HarriziSaad commited on
Commit
f07511a
·
verified ·
1 Parent(s): e639bad

Upload 14 files

Browse files
scripts/analyze_and_filter_mito_dataset.py.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled17.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1GwdSjrwh3f6QCzOa8KHr_XkWy0KmZvdV
8
+ """
9
+
10
+ import pandas as pd
11
+ from pathlib import Path
12
+
13
+ df = pd.read_parquet(BASE_PATH / 'data' / 'processed' / 'mutations_dataset_final.parquet')
14
+
15
+
16
+
17
+ STRICT_MITO_GENES = {
18
+ 'OPA1', 'MFN1', 'MFN2', 'DNM1L', 'FIS1',
19
+ 'AFG3L2', 'SPG7', 'LONP1', 'CLPP', 'YME1L1', 'OMA1', 'HTRA2',
20
+ 'NDUFAF1', 'NDUFAF2', 'NDUFAF3', 'NDUFAF4', 'NDUFAF5', 'NDUFAF6', 'NDUFAF7',
21
+ 'NUBPL', 'ACAD9', 'TIMMDC1', 'FOXRED1',
22
+ 'NDUFS1', 'NDUFS2', 'NDUFS3', 'NDUFS4', 'NDUFS6', 'NDUFS7', 'NDUFS8',
23
+ 'NDUFV1', 'NDUFV2', 'NDUFA1', 'NDUFA2', 'NDUFA9', 'NDUFA10', 'NDUFA11', 'NDUFA12', 'NDUFA13',
24
+ 'NDUFB3', 'NDUFB8', 'NDUFB9', 'NDUFB10', 'NDUFB11',
25
+ 'SDHA', 'SDHB', 'SDHC', 'SDHD', 'SDHAF1', 'SDHAF2',
26
+ 'BCS1L', 'TTC19', 'UQCRB', 'UQCRQ', 'UQCRC2', 'CYC1',
27
+ 'SURF1', 'SCO1', 'SCO2', 'COX10', 'COX14', 'COX15', 'COX20',
28
+ 'COA5', 'COA6', 'COA7', 'PET100', 'COX4I1', 'COX6A1', 'COX6B1', 'COX7B', 'COX8A',
29
+ 'ATP5F1A', 'ATP5F1D', 'ATP5F1E', 'TMEM70', 'ATPAF2',
30
+ 'TIMM50', 'TIMM8A', 'DNAJC19', 'AGK', 'TOMM20', 'TOMM40',
31
+ 'CHCHD2', 'CHCHD10', 'CHCHD4', 'AIFM1', 'COX17',
32
+ 'HSPA9', 'HSPD1', 'HSPE1', 'CLPB',
33
+ 'AARS2', 'DARS2', 'EARS2', 'FARS2', 'HARS2', 'IARS2', 'LARS2', 'MARS2',
34
+ 'NARS2', 'RARS2', 'SARS2', 'TARS2', 'VARS2', 'YARS2',
35
+ 'GFM1', 'TSFM', 'TUFM', 'C12orf65', 'RMND1', 'GTPBP3', 'MTO1', 'TRMU',
36
+ 'MRPS16', 'MRPS22', 'MRPL3', 'MRPL12', 'MRPL44',
37
+ 'POLG', 'POLG2', 'TWNK', 'TFAM', 'RRM2B', 'MPV17', 'DGUOK', 'TK2',
38
+ 'SUCLA2', 'SUCLG1', 'FBXL4',
39
+ 'PDHA1', 'PDHB', 'PDHX', 'DLD', 'DLAT',
40
+ 'PC', 'PCCA', 'PCCB', 'MUT', 'MMAA', 'MMAB', 'MMACHC',
41
+ 'LIAS', 'LIPT1', 'BOLA3', 'NFU1', 'ISCA1', 'ISCA2', 'IBA57', 'GLRX5', 'FDXR',
42
+ 'COQ2', 'COQ4', 'COQ6', 'COQ7', 'COQ8A', 'COQ9', 'PDSS1', 'PDSS2',
43
+ 'SLC25A4', 'SLC25A3', 'SLC25A12', 'SLC25A13', 'SLC25A19', 'SLC25A22',
44
+ 'TAZ', 'SERAC1', 'LRPPRC', 'TACO1', 'ELAC2', 'TRNT1', 'PNPT1',
45
+ }
46
+
47
+ df_mito_strict = df[df['gene_symbol'].isin(STRICT_MITO_GENES)].copy()
48
+
49
+ df_mito_strict.to_parquet(
50
+ BASE_PATH / 'data' / 'processed' / 'mutations_dataset_mito_strict.parquet'
51
+ )
52
+ df_mito_strict.to_csv(
53
+ BASE_PATH / 'data' / 'processed' / 'mutations_dataset_mito_strict.tsv',
54
+ sep='\t',
55
+ index=False
56
+ )
scripts/analyze_bias_and_train_strict_mito.py.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled17.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1GwdSjrwh3f6QCzOa8KHr_XkWy0KmZvdV
8
+ """
9
+
10
+ import pandas as pd
11
+ import numpy as np
12
+ from pathlib import Path
13
+ from sklearn.ensemble import GradientBoostingClassifier
14
+ from sklearn.preprocessing import StandardScaler
15
+ from sklearn.metrics import roc_auc_score, average_precision_score, roc_curve, precision_recall_curve
16
+ import matplotlib.pyplot as plt
17
+ from tqdm import tqdm
18
+ import pickle
19
+
20
+ PATHS = {
21
+ 'features': BASE_PATH / 'features',
22
+ 'models': BASE_PATH / 'models',
23
+ 'results': BASE_PATH / 'results',
24
+ 'figures': BASE_PATH / 'results' / 'figures',
25
+ }
26
+
27
+ PATHS['figures'].mkdir(parents=True, exist_ok=True)
28
+
29
+ df_full = pd.read_parquet(PATHS['features'] / 'features_classical_full.parquet')
30
+
31
+ id_cols = ['mutation_idx', 'uniprot_acc', 'gene_symbol', 'position', 'wt_aa', 'mut_aa', 'label']
32
+ feature_cols_all = [c for c in df_full.columns if c not in id_cols]
33
+
34
+ length_related = ['protein_length', 'position_absolute', 'distance_to_n_term', 'distance_to_c_term']
35
+ feature_cols_no_length = [c for c in feature_cols_all if c not in length_related]
36
+
37
+ print(f" Features totales: {len(feature_cols_all)}")
38
+ print(f" Features sans longueur: {len(feature_cols_no_length)}")
39
+
40
+ X_all = df_full[feature_cols_all].values
41
+ X_no_length = df_full[feature_cols_no_length].values
42
+ y = df_full['label'].values
43
+
44
+ X_all = np.nan_to_num(X_all, nan=0.0, posinf=0.0, neginf=0.0)
45
+ X_no_length = np.nan_to_num(X_no_length, nan=0.0, posinf=0.0, neginf=0.0)
46
+
47
+ proteins = df_full['uniprot_acc'].unique()
48
+
49
+ def quick_lpocv(X, y, proteins_list, df, max_proteins=100):
50
+ """LPOCV rapide sur un échantillon de protéines"""
51
+ results = []
52
+
53
+ np.random.seed(42)
54
+ sample_proteins = np.random.choice(proteins_list, size=min(max_proteins, len(proteins_list)), replace=False)
55
+
56
+ for protein in tqdm(sample_proteins, desc="LPOCV rapide"):
57
+ test_mask = df['uniprot_acc'] == protein
58
+ train_mask = ~test_mask
59
+
60
+ if test_mask.sum() < 2:
61
+ continue
62
+
63
+ X_train, y_train = X[train_mask], y[train_mask]
64
+ X_test, y_test = X[test_mask], y[test_mask]
65
+
66
+ scaler = StandardScaler()
67
+ X_train_s = scaler.fit_transform(X_train)
68
+ X_test_s = scaler.transform(X_test)
69
+
70
+ model = GradientBoostingClassifier(n_estimators=50, max_depth=3, random_state=42)
71
+ model.fit(X_train_s, y_train)
72
+
73
+ y_pred = model.predict_proba(X_test_s)[:, 1]
74
+
75
+ for pred, true in zip(y_pred, y_test):
76
+ results.append({'y_true': true, 'y_pred': pred})
77
+
78
+ df_res = pd.DataFrame(results)
79
+ if len(df_res) > 0 and len(df_res['y_true'].unique()) > 1:
80
+ return roc_auc_score(df_res['y_true'], df_res['y_pred'])
81
+ return 0
82
+
83
+
84
+ df_strict = pd.read_parquet(PATHS['features'] / 'features_classical_mito_strict.parquet')
85
+
86
+ print(f" Mutations: {len(df_strict):,}")
87
+ print(f" Pathogènes: {(df_strict['label']==1).sum():,}")
88
+ print(f" Bénins: {(df_strict['label']==0).sum():,}")
89
+
90
+ X_strict = df_strict[feature_cols_all].values
91
+ y_strict = df_strict['label'].values
92
+ X_strict = np.nan_to_num(X_strict, nan=0.0, posinf=0.0, neginf=0.0)
93
+
94
+ proteins_strict = df_strict['uniprot_acc'].unique()
95
+ print(f" Protéines: {len(proteins_strict)}")
96
+
97
+
98
+ lpocv_strict_results = []
99
+
100
+ for protein in tqdm(proteins_strict, desc="LPOCV strict"):
101
+ test_mask = df_strict['uniprot_acc'] == protein
102
+ train_mask = ~test_mask
103
+
104
+ if test_mask.sum() < 2:
105
+ continue
106
+
107
+ X_train = X_strict[train_mask]
108
+ y_train = y_strict[train_mask]
109
+ X_test = X_strict[test_mask]
110
+ y_test = y_strict[test_mask]
111
+
112
+ scaler = StandardScaler()
113
+ X_train_s = scaler.fit_transform(X_train)
114
+ X_test_s = scaler.transform(X_test)
115
+
116
+ model = GradientBoostingClassifier(n_estimators=100, max_depth=4, learning_rate=0.1, random_state=42)
117
+ model.fit(X_train_s, y_train)
118
+
119
+ y_pred = model.predict_proba(X_test_s)[:, 1]
120
+
121
+ for i, (pred, true) in enumerate(zip(y_pred, y_test)):
122
+ lpocv_strict_results.append({
123
+ 'protein': protein,
124
+ 'y_true': true,
125
+ 'y_pred': pred,
126
+ })
127
+
128
+ df_lpocv_strict = pd.DataFrame(lpocv_strict_results)
129
+
130
+ if len(df_lpocv_strict) > 0 and len(df_lpocv_strict['y_true'].unique()) > 1:
131
+ auc_roc_strict = roc_auc_score(df_lpocv_strict['y_true'], df_lpocv_strict['y_pred'])
132
+ auc_pr_strict = average_precision_score(df_lpocv_strict['y_true'], df_lpocv_strict['y_pred'])
133
+
134
+ print(f" AUC-ROC: {auc_roc_strict:.4f}")
135
+ print(f" AUC-PR: {auc_pr_strict:.4f}")
136
+ else:
137
+ auc_roc_strict = 0
138
+ auc_pr_strict = 0
139
+
140
+ scaler_strict = StandardScaler()
141
+ X_strict_scaled = scaler_strict.fit_transform(X_strict)
142
+
143
+ model_strict = GradientBoostingClassifier(
144
+ n_estimators=300,
145
+ max_depth=5,
146
+ learning_rate=0.05,
147
+ min_samples_leaf=5,
148
+ subsample=0.8,
149
+ random_state=42
150
+ )
151
+
152
+ model_strict.fit(X_strict_scaled, y_strict)
153
+
154
+ importance_strict = pd.DataFrame({
155
+ 'feature': feature_cols_all,
156
+ 'importance': model_strict.feature_importances_
157
+ }).sort_values('importance', ascending=False)
158
+
159
+ print("\n Top 10 features (strict mito):")
160
+ for i, row in importance_strict.head(10).iterrows():
161
+ print(f" {row['importance']:.4f} {row['feature']}")
162
+
163
+ model_strict_data = {
164
+ 'model': model_strict,
165
+ 'scaler': scaler_strict,
166
+ 'feature_cols': feature_cols_all,
167
+ 'metrics': {
168
+ 'auc_roc_lpocv': auc_roc_strict,
169
+ 'auc_pr_lpocv': auc_pr_strict,
170
+ },
171
+ 'n_samples': len(X_strict),
172
+ }
173
+
174
+ with open(PATHS['models'] / 'model_classical_mito_strict.pkl', 'wb') as f:
175
+ pickle.dump(model_strict_data, f)
176
+
177
+
178
+ df_lpocv_full = pd.read_parquet(PATHS['results'] / 'lpocv_predictions.parquet')
179
+ auc_roc_full = roc_auc_score(df_lpocv_full['y_true'], df_lpocv_full['y_pred_proba'])
180
+ auc_pr_full = average_precision_score(df_lpocv_full['y_true'], df_lpocv_full['y_pred_proba'])
scripts/build_clinvar_mito_dataset.py.py ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled17.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1GwdSjrwh3f6QCzOa8KHr_XkWy0KmZvdV
8
+ """
9
+
10
+ import pandas as pd
11
+ import numpy as np
12
+ import gzip
13
+ import re
14
+ from pathlib import Path
15
+ from tqdm import tqdm
16
+
17
+
18
+
19
+ PATHS = {
20
+ 'data_raw': BASE_PATH / 'data' / 'raw',
21
+ 'data_processed': BASE_PATH / 'data' / 'processed',
22
+ 'checkpoints': BASE_PATH / 'models' / 'checkpoints',
23
+ }
24
+
25
+ for path in PATHS.values():
26
+ path.mkdir(parents=True, exist_ok=True)
27
+
28
+ AA_3TO1 = {
29
+ 'Ala': 'A', 'Arg': 'R', 'Asn': 'N', 'Asp': 'D', 'Cys': 'C',
30
+ 'Glu': 'E', 'Gln': 'Q', 'Gly': 'G', 'His': 'H', 'Ile': 'I',
31
+ 'Leu': 'L', 'Lys': 'K', 'Met': 'M', 'Phe': 'F', 'Pro': 'P',
32
+ 'Ser': 'S', 'Thr': 'T', 'Trp': 'W', 'Tyr': 'Y', 'Val': 'V'
33
+ }
34
+
35
+
36
+
37
+ clinvar_file = Path("/content/drive/MyDrive/clinvar/variation_summary.txt.gz")
38
+
39
+ if clinvar_file.exists():
40
+ print(f" Fichier: {clinvar_file}")
41
+
42
+ with gzip.open(clinvar_file, "rt") as f:
43
+ df_clinvar_raw = pd.read_csv(f, sep="\t", low_memory=False)
44
+
45
+ print(f" Lignes totales: {len(df_clinvar_raw):,}")
46
+ print(f" Colonnes: {df_clinvar_raw.columns.tolist()[:10]}...")
47
+
48
+ df_clinvar = df_clinvar_raw[
49
+ df_clinvar_raw["GeneSymbol"].notna() &
50
+ df_clinvar_raw["ProteinChange"].notna() &
51
+ df_clinvar_raw["ClinicalSignificance"].str.contains("Pathogenic|Benign", case=False, na=False)
52
+ ].copy()
53
+
54
+ print(f" Après filtre basique: {len(df_clinvar):,}")
55
+
56
+ MITO_KEYWORDS = [
57
+ "mitochondrial", "Leigh", "MELAS", "MERRF", "NARP", "LHON",
58
+ "optic atrophy", "OXPHOS", "complex I", "complex II", "complex III",
59
+ "complex IV", "complex V", "cardiomyopathy", "encephalopathy",
60
+ "myopathy", "aminoacyl-tRNA", "respiratory chain"
61
+ ]
62
+
63
+ pattern = "|".join(MITO_KEYWORDS)
64
+
65
+ MITO_GENES = [
66
+ 'OPA1', 'MFN1', 'MFN2', 'DNM1L', 'AFG3L2', 'SPG7',
67
+ 'SURF1', 'SCO1', 'SCO2', 'COX10', 'COX15', 'COX6B1',
68
+ 'NDUFAF1', 'NDUFAF2', 'NDUFAF3', 'NDUFAF4', 'NDUFAF5', 'NDUFAF6',
69
+ 'NUBPL', 'ACAD9', 'TIMMDC1', 'FOXRED1',
70
+ 'CHCHD10', 'CHCHD2', 'TIMM50', 'DNAJC19', 'AGK',
71
+ 'HARS2', 'IARS2', 'LARS2', 'MARS2', 'RARS2', 'VARS2', 'YARS2',
72
+ 'DARS2', 'SARS2', 'TARS2', 'AARS2', 'EARS2', 'FARS2', 'NARS2', 'PARS2',
73
+ 'POLG', 'POLG2', 'TWNK', 'RRM2B', 'MPV17', 'DGUOK', 'TK2',
74
+ 'SUCLA2', 'SUCLG1', 'FBXL4', 'SLC25A4', 'SLC25A3',
75
+ 'RMND1', 'GTPBP3', 'MTO1', 'TRMU', 'TSFM', 'GFM1', 'C12orf65',
76
+ 'LRPPRC', 'TACO1', 'MTFMT', 'ELAC2',
77
+ 'BCS1L', 'TTC19', 'UQCRQ', 'UQCRB', 'UQCRC2',
78
+ 'COA5', 'COA6', 'COA7', 'PET100', 'PET117',
79
+ 'TMEM70', 'ATP5F1A', 'ATP5F1D', 'ATP5F1E',
80
+ ]
81
+
82
+ df_mito = df_clinvar[
83
+ df_clinvar["PhenotypeList"].str.contains(pattern, case=False, na=False) |
84
+ df_clinvar["GeneSymbol"].str.upper().isin([g.upper() for g in MITO_GENES])
85
+ ].copy()
86
+
87
+ print(f" Variants mitochondriaux: {len(df_mito):,}")
88
+
89
+ else:
90
+ print(" non trouvé")
91
+ print(" → Téléchargez depuis: https://ftp.ncbi.nlm.nih.gov/pub/clinvar/tab_delimited/")
92
+ df_mito = pd.DataFrame()
93
+
94
+
95
+
96
+ records = []
97
+
98
+ if len(df_mito) > 0:
99
+ for _, row in tqdm(df_mito.iterrows(), total=len(df_mito), desc="Parsing"):
100
+ protein_change = str(row.get("ProteinChange", ""))
101
+
102
+ match = re.search(r'p\.([A-Z])(\d+)([A-Z])', protein_change)
103
+
104
+ if not match:
105
+ match = re.search(r'p\.([A-Z][a-z]{2})(\d+)([A-Z][a-z]{2})', protein_change)
106
+ if match:
107
+ wt_3, pos, mut_3 = match.groups()
108
+ wt = AA_3TO1.get(wt_3)
109
+ mut = AA_3TO1.get(mut_3)
110
+ if wt and mut:
111
+ match = type('Match', (), {'groups': lambda: (wt, pos, mut)})()
112
+ else:
113
+ match = None
114
+
115
+ if not match:
116
+ continue
117
+
118
+ wt, pos, mut = match.groups()
119
+
120
+ clin_sig = str(row.get("ClinicalSignificance", "")).lower()
121
+
122
+ if "pathogenic" in clin_sig and "benign" not in clin_sig:
123
+ label = 1
124
+ elif "benign" in clin_sig and "pathogenic" not in clin_sig:
125
+ label = 0
126
+ else:
127
+ continue
128
+
129
+ review = str(row.get("ReviewStatus", ""))
130
+
131
+ records.append({
132
+ "gene_symbol": str(row["GeneSymbol"]).upper(),
133
+ "position": int(pos) - 1,
134
+ "wt_aa": wt,
135
+ "mut_aa": mut,
136
+ "label": label,
137
+ "source": "ClinVar_local",
138
+ "review_status": review,
139
+ "clinical_significance": row.get("ClinicalSignificance", ""),
140
+ "phenotype": str(row.get("PhenotypeList", ""))[:100],
141
+ })
142
+
143
+ df_clinvar_parsed = pd.DataFrame(records)
144
+
145
+ print(f"\n ✓ Variants parsés: {len(df_clinvar_parsed)}")
146
+
147
+ if len(df_clinvar_parsed) > 0:
148
+ print(f"\n Labels:")
149
+ print(df_clinvar_parsed["label"].value_counts())
150
+
151
+ print(f"\n Top 15 gènes:")
152
+ print(df_clinvar_parsed["gene_symbol"].value_counts().head(15))
153
+
154
+ print(f"\n Review status:")
155
+ print(df_clinvar_parsed["review_status"].value_counts().head(5))
156
+
157
+
158
+ uniprot_file = PATHS['data_raw'] / 'uniprot_mito_extended.parquet'
159
+ proteins_file = PATHS['data_processed'] / 'proteins_targeted.parquet'
160
+
161
+ seq_dict = {}
162
+ gene_to_acc = {}
163
+ acc_to_info = {}
164
+
165
+ if uniprot_file.exists():
166
+ df_uniprot = pd.read_parquet(uniprot_file)
167
+ for _, row in df_uniprot.iterrows():
168
+ acc = row['accession']
169
+ seq = row['sequence']
170
+ gene = str(row['gene_name']).upper() if row['gene_name'] else ''
171
+
172
+ seq_dict[acc] = seq
173
+ acc_to_info[acc] = {
174
+ 'cysteine_fraction': row.get('cysteine_fraction', seq.count('C')/len(seq) if seq else 0),
175
+ 'mito_region': row.get('mito_region', 'Unknown'),
176
+ }
177
+
178
+ if gene:
179
+ gene_to_acc[gene] = acc
180
+ gene_to_acc[gene.replace('-', '')] = acc
181
+
182
+ print(f" Protéines UniProt: {len(df_uniprot)}")
183
+
184
+ if proteins_file.exists():
185
+ df_proteins = pd.read_parquet(proteins_file)
186
+ for _, row in df_proteins.iterrows():
187
+ acc = row['accession']
188
+ seq = row['sequence']
189
+ gene = row['gene_symbol'].upper()
190
+
191
+ if acc not in seq_dict:
192
+ seq_dict[acc] = seq
193
+ gene_to_acc[gene] = acc
194
+
195
+ print(f" Protéines : {len(df_proteins)}")
196
+
197
+ print(f" séquences: {len(seq_dict)}")
198
+ print(f" Gènes : {len(gene_to_acc)}")
199
+
200
+
201
+
202
+ validated = []
203
+ not_found_genes = set()
204
+ seq_mismatch = 0
205
+
206
+ for _, row in tqdm(df_clinvar_parsed.iterrows(), total=len(df_clinvar_parsed), desc="Validation"):
207
+ gene = row['gene_symbol']
208
+
209
+ acc = gene_to_acc.get(gene)
210
+
211
+ if not acc:
212
+ for variant in [gene.replace('-', ''), gene.split('-')[0], gene.split('_')[0]]:
213
+ if variant in gene_to_acc:
214
+ acc = gene_to_acc[variant]
215
+ break
216
+
217
+ if not acc:
218
+ not_found_genes.add(gene)
219
+ continue
220
+
221
+ seq = seq_dict.get(acc, '')
222
+ if not seq:
223
+ continue
224
+
225
+ pos = row['position']
226
+ wt = row['wt_aa']
227
+ mut = row['mut_aa']
228
+
229
+ if 0 <= pos < len(seq):
230
+ if seq[pos] == wt:
231
+ info = acc_to_info.get(acc, {})
232
+
233
+ validated.append({
234
+ 'uniprot_acc': acc,
235
+ 'gene_symbol': gene,
236
+ 'position': pos,
237
+ 'wt_aa': wt,
238
+ 'mut_aa': mut,
239
+ 'label': row['label'],
240
+ 'source': row['source'],
241
+ 'review_status': row.get('review_status', ''),
242
+ 'clinical_significance': row.get('clinical_significance', ''),
243
+ 'phenotype': row.get('phenotype', ''),
244
+ 'cysteine_fraction': info.get('cysteine_fraction', 0),
245
+ 'mito_region': info.get('mito_region', 'Unknown'),
246
+ })
247
+ else:
248
+ seq_mismatch += 1
249
+
250
+ df_validated = pd.DataFrame(validated)
251
+
252
+
253
+
254
+ if len(df_validated) > 0:
255
+ print(f"\n Labels validés:")
256
+ print(df_validated["label"].value_counts())
257
+
258
+
259
+ benign_file = PATHS['data_processed'] / 'mutations_master.parquet'
260
+
261
+ if benign_file.exists():
262
+ df_benign_existing = pd.read_parquet(benign_file)
263
+ print(f" Socle bénin existant: {len(df_benign_existing)}")
264
+
265
+ # Préparer pour fusion
266
+ df_benign_existing = df_benign_existing.copy()
267
+ df_benign_existing['label'] = 0
268
+ df_benign_existing['source'] = df_benign_existing.get('label_source', 'gnomAD_UniProt')
269
+
270
+ else:
271
+ df_benign_existing = pd.DataFrame()
272
+
273
+
274
+
275
+ datasets = []
276
+
277
+ if len(df_validated) > 0:
278
+ datasets.append(df_validated)
279
+ print(f" + ClinVar: {len(df_validated)}")
280
+
281
+ if len(df_benign_existing) > 0:
282
+ cols = ['uniprot_acc', 'position', 'wt_aa', 'mut_aa', 'label', 'source']
283
+ cols_exist = [c for c in cols if c in df_benign_existing.columns]
284
+ df_benign_clean = df_benign_existing[cols_exist].copy()
285
+
286
+ if 'gene_symbol' not in df_benign_clean.columns and 'gene_symbol' in df_benign_existing.columns:
287
+ df_benign_clean['gene_symbol'] = df_benign_existing['gene_symbol']
288
+
289
+ datasets.append(df_benign_clean)
290
+ print(f" + Bénins existants: {len(df_benign_clean)}")
291
+
292
+ if datasets:
293
+ df_final = pd.concat(datasets, ignore_index=True)
294
+
295
+ df_final['mutation_key'] = (
296
+ df_final['uniprot_acc'].astype(str) + '_' +
297
+ df_final['position'].astype(str) + '_' +
298
+ df_final['mut_aa'].astype(str)
299
+ )
300
+
301
+ df_final['priority'] = df_final['source'].apply(lambda x: 0 if 'ClinVar' in str(x) else 1)
302
+ df_final = df_final.sort_values('priority')
303
+ df_final = df_final.drop_duplicates(subset='mutation_key', keep='first')
304
+ df_final = df_final.drop(columns=['priority', 'mutation_key'])
305
+
306
+ print(f"\n ✓ Dataset final: {len(df_final)}")
307
+ else:
308
+ df_final = pd.DataFrame()
309
+ print(" Aucun dataset à fusionner")
310
+
311
+
312
+
313
+ if len(df_final) > 0:
314
+
315
+ df_final['n_terminal'] = df_final['position'] < 50
316
+ df_final['cysteine_gained'] = df_final['mut_aa'] == 'C'
317
+ df_final['cysteine_lost'] = df_final['wt_aa'] == 'C'
318
+ df_final['mutation_id'] = df_final['wt_aa'] + (df_final['position'] + 1).astype(str) + df_final['mut_aa']
319
+
320
+ df_final['ros_axis'] = (
321
+ df_final['cysteine_lost'] |
322
+ df_final['cysteine_gained'] |
323
+ (df_final.get('cysteine_fraction', 0) > 0.03) |
324
+ (df_final.get('mito_region', '') == 'IMS')
325
+ )
326
+
327
+ df_final['import_axis'] = df_final['n_terminal']
328
+
329
+ df_final.to_parquet(PATHS['data_processed'] / 'mutations_dataset_final.parquet')
330
+ df_final.to_csv(PATHS['data_processed'] / 'mutations_dataset_final.tsv', sep='\t', index=False)
331
+
332
+ if len(df_clinvar_parsed) > 0:
333
+ df_clinvar_parsed.to_parquet(PATHS['data_raw'] / 'clinvar_mito_parsed.parquet')
scripts/build_clinvar_uniprot_dataset.py.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled17.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1GwdSjrwh3f6QCzOa8KHr_XkWy0KmZvdV
8
+ """
9
+
10
+ import pandas as pd
11
+ import numpy as np
12
+ import gzip
13
+ import re
14
+ from pathlib import Path
15
+ from tqdm import tqdm
16
+
17
+ PATHS = {
18
+ 'data_raw': BASE_PATH / 'data' / 'raw',
19
+ 'data_processed': BASE_PATH / 'data' / 'processed',
20
+ }
21
+
22
+ for path in PATHS.values():
23
+ path.mkdir(parents=True, exist_ok=True)
24
+
25
+ AA_3TO1 = {
26
+ 'Ala': 'A', 'Arg': 'R', 'Asn': 'N', 'Asp': 'D', 'Cys': 'C',
27
+ 'Glu': 'E', 'Gln': 'Q', 'Gly': 'G', 'His': 'H', 'Ile': 'I',
28
+ 'Leu': 'L', 'Lys': 'K', 'Met': 'M', 'Phe': 'F', 'Pro': 'P',
29
+ 'Ser': 'S', 'Thr': 'T', 'Trp': 'W', 'Tyr': 'Y', 'Val': 'V'
30
+ }
31
+
32
+
33
+
34
+ print(f" Fichier: {uniprot_file.name}")
35
+
36
+ with gzip.open(uniprot_file, 'rt') as f:
37
+ df_uniprot = pd.read_csv(f, sep='\t', low_memory=False)
38
+
39
+ print(f" Protéines : {len(df_uniprot):,}")
40
+ print(f" Colonnes: {df_uniprot.columns.tolist()}")
41
+
42
+ seq_dict = {}
43
+ gene_to_acc = {}
44
+ acc_to_info = {}
45
+
46
+ for _, row in tqdm(df_uniprot.iterrows(), total=len(df_uniprot), desc="Indexation"):
47
+ acc = row['Entry']
48
+ seq = row['Sequence']
49
+
50
+ if pd.isna(seq) or not seq:
51
+ continue
52
+
53
+ seq_dict[acc] = seq
54
+ acc_to_info[acc] = {
55
+ 'length': len(seq),
56
+ 'cysteine_count': seq.count('C'),
57
+ 'cysteine_fraction': seq.count('C') / len(seq) if seq else 0,
58
+ 'protein_name': str(row.get('Protein names', ''))[:50],
59
+ }
60
+
61
+ gene_names = str(row.get('Gene Names', ''))
62
+ if gene_names and gene_names != 'nan':
63
+ for gene in gene_names.split():
64
+ gene_upper = gene.upper().strip()
65
+ if gene_upper:
66
+ gene_to_acc[gene_upper] = acc
67
+ gene_to_acc[gene_upper.replace('-', '')] = acc
68
+
69
+ print(f"\n Séquences indexées: {len(seq_dict):,}")
70
+ print(f" Gènes mappés: {len(gene_to_acc):,}")
71
+
72
+ df_uniprot_clean = df_uniprot[['Entry', 'Gene Names', 'Sequence', 'Protein names']].copy()
73
+ df_uniprot_clean.columns = ['accession', 'gene_names', 'sequence', 'protein_name']
74
+ df_uniprot_clean = df_uniprot_clean[df_uniprot_clean['sequence'].notna()]
75
+ df_uniprot_clean.to_parquet(PATHS['data_raw'] / 'uniprot_human_reviewed.parquet')
76
+
77
+
78
+
79
+ clinvar_parsed_file = PATHS['data_raw'] / 'clinvar_mito_parsed.parquet'
80
+
81
+ if clinvar_parsed_file.exists():
82
+ df_clinvar = pd.read_parquet(clinvar_parsed_file)
83
+ print(f" ✓ ClinVar parsé chargé: {len(df_clinvar):,}")
84
+ else:
85
+ print(" Parsing ClinVar depuis le fichier brut...")
86
+
87
+ clinvar_file = Path("")
88
+
89
+ with gzip.open(clinvar_file, "rt") as f:
90
+ df_raw = pd.read_csv(f, sep="\t", low_memory=False)
91
+
92
+ df_filtered = df_raw[
93
+ df_raw['GeneSymbol'].notna() &
94
+ df_raw['ClinicalSignificance'].str.contains("athogenic|enign", case=False, na=False)
95
+ ].copy()
96
+
97
+ MITO_GENES = [
98
+ 'OPA1', 'MFN1', 'MFN2', 'DNM1L', 'AFG3L2', 'SPG7', 'LONP1', 'CLPP',
99
+ 'NDUFAF1', 'NDUFAF2', 'NDUFAF3', 'NDUFAF4', 'NDUFAF5', 'NDUFAF6',
100
+ 'NUBPL', 'ACAD9', 'TIMMDC1', 'FOXRED1',
101
+ 'NDUFS1', 'NDUFS2', 'NDUFS3', 'NDUFS4', 'NDUFS6', 'NDUFS7', 'NDUFS8',
102
+ 'NDUFV1', 'NDUFV2', 'NDUFA1', 'NDUFA2', 'NDUFA9', 'NDUFA10', 'NDUFA11', 'NDUFA12', 'NDUFA13',
103
+ 'SDHA', 'SDHB', 'SDHC', 'SDHD', 'SDHAF1', 'SDHAF2',
104
+ 'BCS1L', 'TTC19', 'UQCRB', 'UQCRQ', 'UQCRC2',
105
+ 'SURF1', 'SCO1', 'SCO2', 'COX10', 'COX14', 'COX15', 'COX20',
106
+ 'COA5', 'COA6', 'COA7', 'PET100',
107
+ 'COX4I1', 'COX6A1', 'COX6B1', 'COX7B', 'COX8A',
108
+ 'ATP5F1A', 'ATP5F1D', 'ATP5F1E', 'TMEM70',
109
+ 'TIMM50', 'TIMM8A', 'DNAJC19', 'AGK',
110
+ 'CHCHD2', 'CHCHD10', 'AIFM1',
111
+ 'HSPA9', 'HSPD1',
112
+ 'AARS2', 'DARS2', 'EARS2', 'FARS2', 'HARS2', 'IARS2', 'LARS2', 'MARS2',
113
+ 'NARS2', 'RARS2', 'SARS2', 'TARS2', 'VARS2', 'YARS2',
114
+ 'GFM1', 'TSFM', 'C12orf65',
115
+ 'POLG', 'POLG2', 'TWNK', 'TFAM', 'RRM2B', 'MPV17', 'DGUOK', 'TK2',
116
+ 'SUCLA2', 'SUCLG1', 'FBXL4',
117
+ 'PDHA1', 'PDHB', 'PDHX', 'DLD',
118
+ 'PC', 'PCCA', 'PCCB', 'MUT',
119
+ 'LIAS', 'LIPT1', 'BOLA3', 'NFU1', 'ISCA1', 'ISCA2', 'IBA57', 'GLRX5',
120
+ 'COQ2', 'COQ4', 'COQ6', 'COQ7', 'COQ8A', 'COQ9', 'PDSS1', 'PDSS2',
121
+ 'SLC25A4', 'SLC25A3', 'SLC25A12', 'SLC25A13',
122
+ 'TAZ', 'SERAC1',
123
+ 'LRPPRC', 'TACO1', 'ELAC2', 'TRNT1',
124
+ ]
125
+
126
+ MITO_PHENOTYPES = [
127
+ 'mitochondrial', 'Leigh', 'MELAS', 'MERRF', 'NARP', 'LHON',
128
+ 'optic atrophy', 'encephalopathy', 'cardiomyopathy', 'myopathy',
129
+ 'Complex I', 'Complex IV', 'OXPHOS', 'respiratory chain', 'lactic acidosis',
130
+ ]
131
+
132
+ mito_genes_upper = [g.upper() for g in MITO_GENES]
133
+ mask_gene = df_filtered['GeneSymbol'].str.upper().isin(mito_genes_upper)
134
+
135
+ phenotype_pattern = '|'.join(MITO_PHENOTYPES)
136
+ mask_phenotype = df_filtered['PhenotypeList'].str.contains(phenotype_pattern, case=False, na=False)
137
+
138
+ df_mito = df_filtered[mask_gene | mask_phenotype].copy()
139
+
140
+ records = []
141
+ for _, row in tqdm(df_mito.iterrows(), total=len(df_mito), desc="Parsing"):
142
+ name = str(row.get('Name', ''))
143
+
144
+ wt, pos, mut = None, None, None
145
+
146
+ match = re.search(r'p\.([A-Z][a-z]{2})(\d+)([A-Z][a-z]{2})', name)
147
+ if match:
148
+ wt_3, pos_str, mut_3 = match.groups()
149
+ wt = AA_3TO1.get(wt_3)
150
+ mut = AA_3TO1.get(mut_3)
151
+ pos = int(pos_str)
152
+
153
+ if not wt:
154
+ match = re.search(r'p\.([A-Z])(\d+)([A-Z])', name)
155
+ if match:
156
+ wt, pos_str, mut = match.groups()
157
+ pos = int(pos_str)
158
+
159
+ if not (wt and mut and pos):
160
+ continue
161
+
162
+ if wt not in 'ACDEFGHIKLMNPQRSTVWY' or mut not in 'ACDEFGHIKLMNPQRSTVWY':
163
+ continue
164
+
165
+ clin_sig = str(row.get('ClinicalSignificance', '')).lower()
166
+
167
+ if 'pathogenic' in clin_sig and 'benign' not in clin_sig and 'conflicting' not in clin_sig:
168
+ label = 1
169
+ elif 'benign' in clin_sig and 'pathogenic' not in clin_sig and 'conflicting' not in clin_sig:
170
+ label = 0
171
+ else:
172
+ continue
173
+
174
+ records.append({
175
+ 'gene_symbol': str(row['GeneSymbol']).upper(),
176
+ 'position': pos - 1,
177
+ 'wt_aa': wt,
178
+ 'mut_aa': mut,
179
+ 'label': label,
180
+ 'source': 'ClinVar',
181
+ 'clinical_significance': row.get('ClinicalSignificance', ''),
182
+ 'review_status': str(row.get('ReviewStatus', '')),
183
+ })
184
+
185
+ df_clinvar = pd.DataFrame(records)
186
+ df_clinvar['mutation_key'] = df_clinvar['gene_symbol'] + '_' + df_clinvar['position'].astype(str) + '_' + df_clinvar['mut_aa']
187
+ df_clinvar = df_clinvar.drop_duplicates(subset='mutation_key', keep='first')
188
+
189
+ df_clinvar.to_parquet(clinvar_parsed_file)
190
+
191
+ print(df_clinvar['label'].value_counts())
192
+
193
+ print(df_clinvar['gene_symbol'].value_counts().head(15))
194
+
195
+
196
+
197
+ genes_clinvar = set(df_clinvar['gene_symbol'].unique())
198
+ genes_mapped = set(gene_to_acc.keys())
199
+ genes_found = genes_clinvar & genes_mapped
200
+ genes_missing = genes_clinvar - genes_mapped
201
+
202
+ print(f" ClinVar: {len(genes_clinvar)}")
203
+ print(f" trouvés: {len(genes_found)} ({100*len(genes_found)/len(genes_clinvar):.1f}%)")
204
+ print(f" manquants: {len(genes_missing)}")
205
+
206
+ if genes_missing and len(genes_missing) <= 20:
207
+ print(f" Manquants: {genes_missing}")
208
+
209
+ validated = []
210
+ stats = {'found': 0, 'not_found': 0, 'mismatch': 0}
211
+
212
+ for _, row in tqdm(df_clinvar.iterrows(), total=len(df_clinvar), desc="Validation"):
213
+ gene = row['gene_symbol']
214
+
215
+ acc = gene_to_acc.get(gene)
216
+ if not acc:
217
+ for variant in [gene.replace('-', ''), gene.split('-')[0], gene.split(';')[0]]:
218
+ variant = variant.upper()
219
+ if variant in gene_to_acc:
220
+ acc = gene_to_acc[variant]
221
+ break
222
+
223
+ if not acc:
224
+ stats['not_found'] += 1
225
+ continue
226
+
227
+ seq = seq_dict.get(acc, '')
228
+ if not seq:
229
+ stats['not_found'] += 1
230
+ continue
231
+
232
+ pos = row['position']
233
+ wt = row['wt_aa']
234
+ mut = row['mut_aa']
235
+
236
+ if 0 <= pos < len(seq) and seq[pos] == wt:
237
+ info = acc_to_info.get(acc, {})
238
+
239
+ validated.append({
240
+ 'uniprot_acc': acc,
241
+ 'gene_symbol': gene,
242
+ 'position': pos,
243
+ 'wt_aa': wt,
244
+ 'mut_aa': mut,
245
+ 'label': row['label'],
246
+ 'source': 'ClinVar',
247
+ 'review_status': row.get('review_status', ''),
248
+ 'cysteine_fraction': info.get('cysteine_fraction', 0),
249
+ 'protein_name': info.get('protein_name', ''),
250
+ })
251
+ stats['found'] += 1
252
+ else:
253
+ stats['mismatch'] += 1
254
+
255
+
256
+
257
+ if len(df_validated) > 0:
258
+ df_validated['mutation_id'] = df_validated['wt_aa'] + (df_validated['position'] + 1).astype(str) + df_validated['mut_aa']
259
+ df_validated['n_terminal'] = df_validated['position'] < 50
260
+ df_validated['cysteine_gained'] = df_validated['mut_aa'] == 'C'
261
+ df_validated['cysteine_lost'] = df_validated['wt_aa'] == 'C'
262
+ df_validated['ros_axis'] = df_validated['cysteine_lost'] | df_validated['cysteine_gained'] | (df_validated['cysteine_fraction'] > 0.03)
263
+ df_validated['import_axis'] = df_validated['n_terminal']
264
+
265
+ df_validated['mutation_key'] = df_validated['uniprot_acc'] + '_' + df_validated['position'].astype(str) + '_' + df_validated['mut_aa']
266
+ df_validated = df_validated.drop_duplicates(subset='mutation_key', keep='first')
267
+
268
+ df_validated.to_parquet(PATHS['data_processed'] / 'mutations_dataset_final.parquet')
269
+ df_validated.to_csv(PATHS['data_processed'] / 'mutations_dataset_final.tsv', sep='\t', index=False)
scripts/build_mito_clinvar_dataset.py.py ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled17.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1GwdSjrwh3f6QCzOa8KHr_XkWy0KmZvdV
8
+ """
9
+
10
+ import pandas as pd
11
+ import numpy as np
12
+ import gzip
13
+ import re
14
+ from pathlib import Path
15
+ from tqdm import tqdm
16
+
17
+
18
+
19
+ PATHS = {
20
+ 'data_raw': BASE_PATH / 'data' / 'raw',
21
+ 'data_processed': BASE_PATH / 'data' / 'processed',
22
+ }
23
+
24
+ for path in PATHS.values():
25
+ path.mkdir(parents=True, exist_ok=True)
26
+
27
+ AA_3TO1 = {
28
+ 'Ala': 'A', 'Arg': 'R', 'Asn': 'N', 'Asp': 'D', 'Cys': 'C',
29
+ 'Glu': 'E', 'Gln': 'Q', 'Gly': 'G', 'His': 'H', 'Ile': 'I',
30
+ 'Leu': 'L', 'Lys': 'K', 'Met': 'M', 'Phe': 'F', 'Pro': 'P',
31
+ 'Ser': 'S', 'Thr': 'T', 'Trp': 'W', 'Tyr': 'Y', 'Val': 'V'
32
+ }
33
+
34
+
35
+ print(f" Fichier: {clinvar_file}")
36
+ print(f" Taille: {clinvar_file.stat().st_size / 1e6:.1f} MB")
37
+
38
+ with gzip.open(clinvar_file, "rt") as f:
39
+ df_raw = pd.read_csv(f, sep="\t", low_memory=False)
40
+
41
+ print(f" ✓ Lignes : {len(df_raw):,}")
42
+ print(f" ✓ Colonnes: {len(df_raw.columns)}")
43
+
44
+ for i, col in enumerate(df_raw.columns):
45
+ print(f" {i}: {col}")
46
+
47
+
48
+
49
+
50
+ gene_col = 'GeneSymbol' if 'GeneSymbol' in df_raw.columns else 'Gene'
51
+ name_col = 'Name' if 'Name' in df_raw.columns else None
52
+ clin_sig_col = 'ClinicalSignificance' if 'ClinicalSignificance' in df_raw.columns else 'ClinSig'
53
+ phenotype_col = 'PhenotypeList' if 'PhenotypeList' in df_raw.columns else None
54
+
55
+ print(f" gène: {gene_col}")
56
+ print(f" nom: {name_col}")
57
+ print(f" signification: {clin_sig_col}")
58
+ print(f" phénotype: {phenotype_col}")
59
+
60
+ df_filtered = df_raw[
61
+ df_raw[gene_col].notna() &
62
+ df_raw[clin_sig_col].str.contains("athogenic|enign", case=False, na=False)
63
+ ].copy()
64
+
65
+
66
+ MITO_GENES = [
67
+ 'OPA1', 'MFN1', 'MFN2', 'DNM1L', 'FIS1',
68
+ 'AFG3L2', 'SPG7', 'OMA1', 'YME1L1', 'LONP1', 'CLPP', 'HTRA2',
69
+ 'NDUFAF1', 'NDUFAF2', 'NDUFAF3', 'NDUFAF4', 'NDUFAF5', 'NDUFAF6', 'NDUFAF7', 'NDUFAF8',
70
+ 'NUBPL', 'ACAD9', 'TIMMDC1', 'FOXRED1', 'ECSIT',
71
+ 'NDUFS1', 'NDUFS2', 'NDUFS3', 'NDUFS4', 'NDUFS6', 'NDUFS7', 'NDUFS8',
72
+ 'NDUFV1', 'NDUFV2', 'NDUFA1', 'NDUFA2', 'NDUFA9', 'NDUFA10', 'NDUFA11', 'NDUFA12', 'NDUFA13',
73
+ 'NDUFB3', 'NDUFB8', 'NDUFB9', 'NDUFB10', 'NDUFB11',
74
+ 'SDHA', 'SDHB', 'SDHC', 'SDHD', 'SDHAF1', 'SDHAF2',
75
+ 'BCS1L', 'TTC19', 'UQCRB', 'UQCRQ', 'UQCRC2', 'UQCRFS1', 'CYC1',
76
+ 'SURF1', 'SCO1', 'SCO2', 'COX10', 'COX14', 'COX15', 'COX20',
77
+ 'COA5', 'COA6', 'COA7', 'COA8', 'PET100', 'PET117',
78
+ 'COX4I1', 'COX4I2', 'COX5A', 'COX6A1', 'COX6A2', 'COX6B1', 'COX6C', 'COX7A1', 'COX7B', 'COX8A',
79
+ 'ATP5F1A', 'ATP5F1B', 'ATP5F1C', 'ATP5F1D', 'ATP5F1E',
80
+ 'ATP5MC1', 'ATP5MC2', 'ATP5MC3', 'ATP5MG', 'ATP5PB', 'ATP5PD', 'ATP5PF',
81
+ 'TMEM70', 'ATPAF2',
82
+ 'TIMM50', 'TIMM44', 'TIMM23', 'TIMM22', 'TIMM8A', 'TIMM8B', 'TIMM13',
83
+ 'TOMM20', 'TOMM22', 'TOMM40', 'TOMM70',
84
+ 'DNAJC19', 'PAM16', 'MAGMAS', 'AGK',
85
+ 'CHCHD2', 'CHCHD10', 'CHCHD4', 'AIFM1', 'GFER',
86
+ 'COX17', 'COX19', 'SCO1', 'SCO2',
87
+ 'HSPA9', 'HSPD1', 'HSPE1', 'CLPB',
88
+ 'AARS2', 'CARS2', 'DARS2', 'EARS2', 'FARS2', 'GARS1', 'HARS2', 'IARS2',
89
+ 'KARS1', 'LARS2', 'MARS2', 'NARS2', 'PARS2', 'QARS1', 'RARS2', 'SARS2',
90
+ 'TARS2', 'VARS2', 'WARS2', 'YARS2',
91
+ 'GFM1', 'GFM2', 'TSFM', 'TUFM', 'MRPS16', 'MRPS22', 'MRPL3', 'MRPL12', 'MRPL44',
92
+ 'C12orf65', 'RMND1', 'GTPBP3', 'MTO1', 'TRMU',
93
+ 'POLG', 'POLG2', 'TWNK', 'TFAM', 'TFB1M', 'TFB2M', 'TEFM',
94
+ 'RRM2B', 'MPV17', 'DGUOK', 'TK2', 'SUCLA2', 'SUCLG1', 'ABAT',
95
+ 'PDHA1', 'PDHB', 'PDHX', 'DLD', 'DLAT',
96
+ 'BCKDHA', 'BCKDHB', 'DBT',
97
+ 'PC', 'PCCA', 'PCCB', 'MUT', 'MMAA', 'MMAB', 'MMACHC', 'MMADHC',
98
+ 'LIAS', 'LIPT1', 'LIPT2', 'BOLA3', 'NFU1', 'ISCA1', 'ISCA2', 'IBA57',
99
+ 'GLRX5', 'FDXR', 'FDX1', 'FDX2',
100
+ 'COQ2', 'COQ4', 'COQ5', 'COQ6', 'COQ7', 'COQ8A', 'COQ8B', 'COQ9', 'PDSS1', 'PDSS2',
101
+ 'FBXL4', 'SLC25A4', 'SLC25A3', 'SLC25A12', 'SLC25A13', 'SLC25A19', 'SLC25A22',
102
+ 'SERAC1', 'TAZ', 'DCAKD',
103
+ 'LRPPRC', 'TACO1', 'MTFMT', 'ELAC2', 'TRNT1',
104
+ 'PNPT1', 'PUS1', 'FASTKD2',
105
+ 'IDH2', 'IDH3A', 'IDH3B', 'IDH3G',
106
+ ]
107
+
108
+ MITO_PHENOTYPES = [
109
+ 'mitochondrial', 'Leigh', 'MELAS', 'MERRF', 'NARP', 'LHON',
110
+ 'Kearns-Sayre', 'CPEO', 'optic atrophy', 'encephalopathy',
111
+ 'cardiomyopathy', 'myopathy', 'Complex I', 'Complex II',
112
+ 'Complex III', 'Complex IV', 'Complex V', 'OXPHOS',
113
+ 'respiratory chain', 'lactic acidosis', 'Alpers',
114
+ 'Pearson', 'Barth', 'Sengers', 'aminoacyl-tRNA',
115
+ ]
116
+
117
+ mito_genes_upper = [g.upper() for g in MITO_GENES]
118
+
119
+ mask_gene = df_filtered[gene_col].str.upper().isin(mito_genes_upper)
120
+
121
+ mask_phenotype = pd.Series(False, index=df_filtered.index)
122
+ if phenotype_col and phenotype_col in df_filtered.columns:
123
+ phenotype_pattern = '|'.join(MITO_PHENOTYPES)
124
+ mask_phenotype = df_filtered[phenotype_col].str.contains(phenotype_pattern, case=False, na=False)
125
+
126
+ df_mito = df_filtered[mask_gene | mask_phenotype].copy()
127
+
128
+ print(f"\n {mask_gene.sum():,}")
129
+ print(f" {mask_phenotype.sum():,}")
130
+ print(f" Total (union): {len(df_mito):,}")
131
+
132
+
133
+
134
+ records = []
135
+
136
+ for _, row in tqdm(df_mito.iterrows(), total=len(df_mito), desc="Parsing"):
137
+ name = str(row.get('Name', ''))
138
+
139
+ wt, pos, mut = None, None, None
140
+
141
+ match = re.search(r'p\.([A-Z][a-z]{2})(\d+)([A-Z][a-z]{2})', name)
142
+ if match:
143
+ wt_3, pos_str, mut_3 = match.groups()
144
+ wt = AA_3TO1.get(wt_3)
145
+ mut = AA_3TO1.get(mut_3)
146
+ pos = int(pos_str)
147
+
148
+ if not wt:
149
+ match = re.search(r'p\.([A-Z])(\d+)([A-Z])', name)
150
+ if match:
151
+ wt, pos_str, mut = match.groups()
152
+ pos = int(pos_str)
153
+
154
+ if not wt:
155
+ match = re.search(r'\(p\.([A-Z][a-z]{2})(\d+)([A-Z][a-z]{2})\)', name)
156
+ if match:
157
+ wt_3, pos_str, mut_3 = match.groups()
158
+ wt = AA_3TO1.get(wt_3)
159
+ mut = AA_3TO1.get(mut_3)
160
+ pos = int(pos_str)
161
+
162
+ if not (wt and mut and pos):
163
+ continue
164
+
165
+ if wt not in 'ACDEFGHIKLMNPQRSTVWY' or mut not in 'ACDEFGHIKLMNPQRSTVWY':
166
+ continue
167
+
168
+ clin_sig = str(row.get(clin_sig_col, '')).lower()
169
+
170
+ if 'pathogenic' in clin_sig and 'benign' not in clin_sig and 'conflicting' not in clin_sig:
171
+ label = 1
172
+ elif 'benign' in clin_sig and 'pathogenic' not in clin_sig and 'conflicting' not in clin_sig:
173
+ label = 0
174
+ else:
175
+ continue
176
+
177
+ records.append({
178
+ 'gene_symbol': str(row[gene_col]).upper(),
179
+ 'position': pos - 1,
180
+ 'wt_aa': wt,
181
+ 'mut_aa': mut,
182
+ 'label': label,
183
+ 'source': 'ClinVar',
184
+ 'clinical_significance': row.get(clin_sig_col, ''),
185
+ 'review_status': str(row.get('ReviewStatus', '')),
186
+ 'phenotype': str(row.get(phenotype_col, ''))[:100] if phenotype_col else '',
187
+ })
188
+
189
+ df_parsed = pd.DataFrame(records)
190
+
191
+
192
+ if len(df_parsed) > 0:
193
+
194
+ print(df_parsed['label'].value_counts())
195
+
196
+
197
+ print(df_parsed['gene_symbol'].value_counts().head(15))
198
+
199
+ df_parsed['mutation_key'] = df_parsed['gene_symbol'] + '_' + df_parsed['position'].astype(str) + '_' + df_parsed['mut_aa']
200
+ df_parsed = df_parsed.drop_duplicates(subset='mutation_key', keep='first')
201
+ print(f"\n Après dédoublonnage: {len(df_parsed):,}")
202
+
203
+ uniprot_file = PATHS['data_raw'] / 'uniprot_mito_extended.parquet'
204
+ proteins_file = PATHS['data_processed'] / 'proteins_targeted.parquet'
205
+
206
+ seq_dict = {}
207
+ gene_to_acc = {}
208
+ acc_to_info = {}
209
+
210
+ if uniprot_file.exists():
211
+ df_uniprot = pd.read_parquet(uniprot_file)
212
+ for _, row in df_uniprot.iterrows():
213
+ acc = row['accession']
214
+ seq = row['sequence']
215
+ gene = str(row['gene_name']).upper() if pd.notna(row['gene_name']) else ''
216
+
217
+ seq_dict[acc] = seq
218
+ acc_to_info[acc] = {
219
+ 'cysteine_fraction': row.get('cysteine_fraction', seq.count('C')/len(seq) if seq else 0),
220
+ 'mito_region': row.get('mito_region', 'Unknown'),
221
+ }
222
+
223
+ if gene:
224
+ gene_to_acc[gene] = acc
225
+ gene_to_acc[gene.replace('-', '')] = acc
226
+
227
+ print(f" Protéines UniProt: {len(df_uniprot):,}")
228
+
229
+ if proteins_file.exists():
230
+ df_proteins = pd.read_parquet(proteins_file)
231
+ for _, row in df_proteins.iterrows():
232
+ acc = row['accession']
233
+ seq = row['sequence']
234
+ gene = row['gene_symbol'].upper()
235
+
236
+ if acc not in seq_dict:
237
+ seq_dict[acc] = seq
238
+ gene_to_acc[gene] = acc
239
+
240
+ print(f" Protéines : {len(df_proteins)}")
241
+
242
+ print(f" séquences: {len(seq_dict):,}")
243
+ print(f" mappés: {len(gene_to_acc):,}")
244
+
245
+ if len(df_parsed) > 0:
246
+ genes_clinvar = set(df_parsed['gene_symbol'].unique())
247
+ genes_mapped = set(gene_to_acc.keys())
248
+ genes_found = genes_clinvar & genes_mapped
249
+ genes_missing = genes_clinvar - genes_mapped
250
+
251
+ print(f"\n Gènes : {len(genes_clinvar)}")
252
+ print(f" trouvés : {len(genes_found)}")
253
+ print(f" manquants: {len(genes_missing)}")
254
+
255
+ if genes_missing:
256
+ print(f" Exemples manquants: {list(genes_missing)[:10]}")
257
+
258
+
259
+
260
+ validated = []
261
+ stats = {'not_found': 0, 'seq_mismatch': 0, 'valid': 0}
262
+
263
+ for _, row in tqdm(df_parsed.iterrows(), total=len(df_parsed), desc="Validation"):
264
+ gene = row['gene_symbol']
265
+
266
+ acc = gene_to_acc.get(gene)
267
+ if not acc:
268
+ for variant in [gene.replace('-', ''), gene.split('-')[0]]:
269
+ if variant in gene_to_acc:
270
+ acc = gene_to_acc[variant]
271
+ break
272
+
273
+ if not acc:
274
+ stats['not_found'] += 1
275
+ continue
276
+
277
+ seq = seq_dict.get(acc, '')
278
+ if not seq:
279
+ stats['not_found'] += 1
280
+ continue
281
+
282
+ pos = row['position']
283
+ wt = row['wt_aa']
284
+ mut = row['mut_aa']
285
+
286
+ if 0 <= pos < len(seq):
287
+ if seq[pos] == wt:
288
+ info = acc_to_info.get(acc, {})
289
+
290
+ validated.append({
291
+ 'uniprot_acc': acc,
292
+ 'gene_symbol': gene,
293
+ 'position': pos,
294
+ 'wt_aa': wt,
295
+ 'mut_aa': mut,
296
+ 'label': row['label'],
297
+ 'source': 'ClinVar',
298
+ 'review_status': row.get('review_status', ''),
299
+ 'clinical_significance': row.get('clinical_significance', ''),
300
+ 'phenotype': row.get('phenotype', ''),
301
+ 'cysteine_fraction': info.get('cysteine_fraction', 0),
302
+ 'mito_region': info.get('mito_region', 'Unknown'),
303
+ })
304
+ stats['valid'] += 1
305
+ else:
306
+ stats['seq_mismatch'] += 1
307
+ else:
308
+ stats['seq_mismatch'] += 1
309
+
310
+ df_validated = pd.DataFrame(validated)
311
+
312
+
313
+ if len(df_validated) > 0:
314
+ print(f"\n Labels validés:")
315
+ print(df_validated['label'].value_counts())
316
+
317
+
318
+
319
+ benign_file = PATHS['data_processed'] / 'mutations_master.parquet'
320
+
321
+ if benign_file.exists():
322
+ df_benign_existing = pd.read_parquet(benign_file)
323
+ df_benign_existing = df_benign_existing.copy()
324
+ df_benign_existing['label'] = 0
325
+ df_benign_existing['source'] = 'gnomAD_UniProt'
326
+ print(f" Socle bénin existant: {len(df_benign_existing)}")
327
+ else:
328
+ df_benign_existing = pd.DataFrame()
329
+ print(" Pas de socle bénin existant")
330
+
331
+
332
+
333
+ datasets = []
334
+
335
+ if len(df_validated) > 0:
336
+ datasets.append(df_validated)
337
+ print(f" + ClinVar validé: {len(df_validated)}")
338
+
339
+ if len(df_benign_existing) > 0:
340
+ cols_needed = ['uniprot_acc', 'position', 'wt_aa', 'mut_aa', 'label', 'source']
341
+ cols_available = [c for c in cols_needed if c in df_benign_existing.columns]
342
+
343
+ df_benign_clean = df_benign_existing[cols_available].copy()
344
+
345
+ if 'gene_symbol' in df_benign_existing.columns:
346
+ df_benign_clean['gene_symbol'] = df_benign_existing['gene_symbol']
347
+
348
+ datasets.append(df_benign_clean)
349
+ print(f" + Bénins existants: {len(df_benign_clean)}")
350
+
351
+ if datasets:
352
+ df_final = pd.concat(datasets, ignore_index=True)
353
+
354
+ df_final['mutation_key'] = (
355
+ df_final['uniprot_acc'].astype(str) + '_' +
356
+ df_final['position'].astype(str) + '_' +
357
+ df_final['mut_aa'].astype(str)
358
+ )
359
+ df_final['priority'] = df_final['source'].apply(lambda x: 0 if 'ClinVar' in str(x) else 1)
360
+ df_final = df_final.sort_values('priority')
361
+ df_final = df_final.drop_duplicates(subset='mutation_key', keep='first')
362
+ df_final = df_final.drop(columns=['priority', 'mutation_key'])
363
+
364
+ print(f"\n ✓ Dataset final: {len(df_final):,}")
365
+ else:
366
+ df_final = pd.DataFrame()
367
+
368
+
369
+
370
+ if 'gene_symbol' in df_final.columns:
371
+ patho_by_gene = df_final[df_final['label']==1]['gene_symbol'].value_counts().head(20)
372
+ print(patho_by_gene)
373
+
374
+ df_final.to_parquet(PATHS['data_processed'] / 'mutations_dataset_final.parquet')
375
+ df_final.to_csv(PATHS['data_processed'] / 'mutations_dataset_final.tsv', sep='\t', index=False)
376
+
377
+ df_parsed.to_parquet(PATHS['data_raw'] / 'clinvar_mito_parsed.parquet')
scripts/build_mutation_dataset.py.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled17.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1GwdSjrwh3f6QCzOa8KHr_XkWy0KmZvdV
8
+ """
9
+
10
+ import pandas as pd
11
+ import numpy as np
12
+ from ast import literal_eval
13
+ import re
14
+
15
+ df_disprot = pd.read_parquet(PATHS['disprot'] / 'disprot_data.parquet')
16
+ df_uniprot = pd.read_parquet(PATHS['uniprot'] / 'uniprot_mitochondrial.parquet')
17
+ df_clinvar = pd.read_parquet(PATHS['clinvar'] / 'clinvar_variants.parquet')
18
+ df_mobidb = pd.read_parquet(PATHS['mobidb'] / 'mobidb_data.parquet')
19
+
20
+ print(f" DisProt: {len(df_disprot)} ")
21
+ print(f" UniProt: {len(df_uniprot)} ")
22
+ print(f" ClinVar: {len(df_clinvar)} ")
23
+ print(f" MobiDB: {len(df_mobidb)} ")
24
+
25
+
26
+ mito_accs = set(df_uniprot['uniprot_acc'].unique())
27
+
28
+ df_disprot_mito = df_disprot[df_disprot['uniprot_acc'].isin(mito_accs)].copy()
29
+ print(f" ✓ DisProt : {len(df_disprot_mito)} régions ({df_disprot_mito['uniprot_acc'].nunique()} protéines)")
30
+
31
+ idp_mito_accs = set(df_disprot_mito['uniprot_acc'].unique())
32
+
33
+
34
+
35
+
36
+
37
+
38
+
39
+ def parse_protein_change(change_str: str) -> dict:
40
+ if not change_str or pd.isna(change_str):
41
+ return None
42
+
43
+
44
+ aa_map = {
45
+ 'Ala': 'A', 'Arg': 'R', 'Asn': 'N', 'Asp': 'D', 'Cys': 'C',
46
+ 'Gln': 'Q', 'Glu': 'E', 'Gly': 'G', 'His': 'H', 'Ile': 'I',
47
+ 'Leu': 'L', 'Lys': 'K', 'Met': 'M', 'Phe': 'F', 'Pro': 'P',
48
+ 'Ser': 'S', 'Thr': 'T', 'Trp': 'W', 'Tyr': 'Y', 'Val': 'V',
49
+ 'Ter': '*'
50
+ }
51
+
52
+ pattern = r'([A-Z][a-z]{2})(\d+)([A-Z][a-z]{2})'
53
+ match = re.match(pattern, change_str)
54
+
55
+ if match:
56
+ wt_3 = match.group(1)
57
+ pos = int(match.group(2))
58
+ mut_3 = match.group(3)
59
+
60
+ wt_1 = aa_map.get(wt_3, '?')
61
+ mut_1 = aa_map.get(mut_3, '?')
62
+
63
+ if wt_1 != '?' and mut_1 != '?' and mut_1 != '*':
64
+ return {
65
+ 'position': pos - 1,
66
+ 'wt_aa': wt_1,
67
+ 'mut_aa': mut_1
68
+ }
69
+
70
+ return None
71
+
72
+ parsed_mutations = []
73
+ for idx, row in df_clinvar.iterrows():
74
+ parsed = parse_protein_change(row['protein_change'])
75
+ if parsed:
76
+ parsed['clinvar_id'] = row['clinvar_id']
77
+ parsed['gene'] = row['gene']
78
+ parsed['is_pathogenic'] = row['is_pathogenic']
79
+ parsed['is_benign'] = row['is_benign']
80
+ parsed['clinical_significance'] = row['clinical_significance']
81
+ parsed_mutations.append(parsed)
82
+
83
+ df_mutations = pd.DataFrame(parsed_mutations)
84
+ print(f" ✓ {len(df_mutations)} mutations parsées avec succès")
85
+
86
+ n_pathogenic = df_mutations['is_pathogenic'].sum()
87
+ n_benign = df_mutations['is_benign'].sum()
88
+ print(f" ✓ Pathognes {n_pathogenic}")
89
+ print(f" ✓ Bénin: {n_benign}")
90
+
91
+
92
+
93
+
94
+
95
+
96
+ gene_to_seq = {}
97
+ gene_to_acc = {}
98
+ for _, row in df_uniprot.iterrows():
99
+ gene = row['gene_name']
100
+ if gene and row['sequence']:
101
+ gene_to_seq[gene] = row['sequence']
102
+ gene_to_acc[gene] = row['uniprot_acc']
103
+
104
+ df_mutations['sequence'] = df_mutations['gene'].map(gene_to_seq)
105
+ df_mutations['uniprot_acc'] = df_mutations['gene'].map(gene_to_acc)
106
+
107
+ df_mutations_valid = df_mutations.dropna(subset=['sequence']).copy()
108
+
109
+ def validate_mutation(row):
110
+ seq = row['sequence']
111
+ pos = row['position']
112
+ wt = row['wt_aa']
113
+
114
+ if pos < 0 or pos >= len(seq):
115
+ return False
116
+
117
+ actual_aa = seq[pos]
118
+ return actual_aa == wt
119
+
120
+ df_mutations_valid['is_valid'] = df_mutations_valid.apply(validate_mutation, axis=1)
121
+ df_mutations_final = df_mutations_valid[df_mutations_valid['is_valid']].copy()
122
+
123
+ print(f" ✓ {len(df_mutations_final)} mutations validé")
124
+
125
+
126
+ df_pathogenic = df_mutations_final[df_mutations_final['is_pathogenic']].copy()
127
+ df_benign = df_mutations_final[df_mutations_final['is_benign']].copy()
128
+
129
+ print(f" Pathogènes : {len(df_pathogenic)}")
130
+ print(f" Bénins : {len(df_benign)}")
131
+
132
+ df_pathogenic['label'] = 1
133
+ df_benign['label'] = 0
134
+
135
+ df_dataset = pd.concat([df_pathogenic, df_benign], ignore_index=True)
136
+ df_dataset = df_dataset.sample(frac=1, random_state=42).reset_index(drop=True)
137
+
138
+ print(f" ✓ : {len(df_dataset)} mutations")
139
+
140
+
141
+
142
+ disorder_regions_by_acc = {}
143
+ for acc in df_disprot['uniprot_acc'].unique():
144
+ regions = df_disprot[df_disprot['uniprot_acc'] == acc][['region_start', 'region_end']].values.tolist()
145
+ disorder_regions_by_acc[acc] = regions
146
+
147
+ def is_in_disorder_region(row):
148
+ acc = row['uniprot_acc']
149
+ pos = row['position'] + 1
150
+
151
+ if acc not in disorder_regions_by_acc:
152
+ return None
153
+ for start, end in disorder_regions_by_acc[acc]:
154
+ if start <= pos <= end:
155
+ return True
156
+ return False
157
+
158
+ df_dataset['in_disorder_region'] = df_dataset.apply(is_in_disorder_region, axis=1)
159
+
160
+ n_in_disorder = df_dataset['in_disorder_region'].sum()
161
+ n_annotated = df_dataset['in_disorder_region'].notna().sum()
162
+ print(f" ✓ {n_in_disorder}/{n_annotated} ")
scripts/data_download.py ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled17.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1GwdSjrwh3f6QCzOa8KHr_XkWy0KmZvdV
8
+ """
9
+
10
+ import requests
11
+ import json
12
+ import gzip
13
+ import time
14
+ import pandas as pd
15
+ from io import StringIO, BytesIO
16
+ from tqdm.auto import tqdm
17
+ import pickle
18
+
19
+ class DisProtDownloader:
20
+
21
+ BASE_URL = "https://disprot.org/api"
22
+
23
+ def __init__(self, save_dir: Path):
24
+ self.save_dir = save_dir
25
+ self.save_dir.mkdir(parents=True, exist_ok=True)
26
+
27
+ def download_all(self) -> pd.DataFrame:
28
+ """Télécharger toutes les entrées DisProt"""
29
+ print("\n📥 Téléchargement DisProt...")
30
+
31
+ url = f"{self.BASE_URL}/search?release=current&show_ambiguous=false&format=json"
32
+
33
+ try:
34
+ response = requests.get(url, timeout=120)
35
+ response.raise_for_status()
36
+ data = response.json()
37
+
38
+ entries = data.get('data', [])
39
+ print(f" ✓ {len(entries)} entrées téléchargées")
40
+
41
+ records = []
42
+ for entry in tqdm(entries, desc=" Parsing"):
43
+ acc = entry.get('acc', '')
44
+ disprot_id = entry.get('disprot_id', '')
45
+ name = entry.get('name', '')
46
+ sequence = entry.get('sequence', '')
47
+ organism = entry.get('organism', '')
48
+
49
+
50
+ for region in entry.get('regions', []):
51
+ records.append({
52
+ 'disprot_id': disprot_id,
53
+ 'uniprot_acc': acc,
54
+ 'name': name,
55
+ 'organism': organism,
56
+ 'sequence': sequence,
57
+ 'region_start': region.get('start', 0),
58
+ 'region_end': region.get('end', 0),
59
+ 'region_type': region.get('type', ''),
60
+ 'term_name': region.get('term_name', ''),
61
+ 'evidence': region.get('evidence', '')
62
+ })
63
+
64
+ df = pd.DataFrame(records)
65
+
66
+
67
+ save_path = self.save_dir / 'disprot_data.parquet'
68
+ df.to_parquet(save_path)
69
+ print(f" ✓ Sauvegardé: {save_path}")
70
+
71
+ return df
72
+
73
+ except Exception as e:
74
+ print(f" ✗ Erreur DisProt: {e}")
75
+ return pd.DataFrame()
76
+
77
+
78
+
79
+
80
+ class UniProtDownloader:
81
+
82
+ BASE_URL = "https://rest.uniprot.org/uniprotkb"
83
+
84
+ def __init__(self, save_dir: Path):
85
+ self.save_dir = save_dir
86
+ self.save_dir.mkdir(parents=True, exist_ok=True)
87
+
88
+ def download_mitochondrial_human(self, max_results: int = 5000) -> pd.DataFrame:
89
+ """Télécharger les protéines mitochondriales humaines"""
90
+
91
+ query = "(organism_id:9606) AND (cc_scl_term:SL-0173)"
92
+
93
+ url = f"{self.BASE_URL}/search"
94
+ params = {
95
+ 'query': query,
96
+ 'format': 'json',
97
+ 'size': min(500, max_results),
98
+ 'fields': 'accession,id,protein_name,gene_names,sequence,length,cc_subcellular_location,ft_domain,ft_region,organism_name'
99
+ }
100
+
101
+ all_results = []
102
+
103
+ try:
104
+ response = requests.get(url, params=params, timeout=120)
105
+ response.raise_for_status()
106
+ data = response.json()
107
+
108
+ results = data.get('results', [])
109
+ all_results.extend(results)
110
+ print(f" ✓ {len(results)} ")
111
+
112
+ next_link = data.get('link', {}).get('next')
113
+ while next_link and len(all_results) < max_results:
114
+ time.sleep(0.5)
115
+ response = requests.get(next_link, timeout=120)
116
+ response.raise_for_status()
117
+ data = response.json()
118
+ results = data.get('results', [])
119
+ all_results.extend(results)
120
+ next_link = data.get('link', {}).get('next')
121
+ print(f" ... {len(all_results)} protéines")
122
+
123
+ records = []
124
+ for entry in tqdm(all_results[:max_results], desc=" Parsing"):
125
+ acc = entry.get('primaryAccession', '')
126
+
127
+ seq_data = entry.get('sequence', {})
128
+ sequence = seq_data.get('value', '')
129
+ length = seq_data.get('length', 0)
130
+
131
+ protein_name = ''
132
+ if 'proteinDescription' in entry:
133
+ rec_name = entry['proteinDescription'].get('recommendedName', {})
134
+ protein_name = rec_name.get('fullName', {}).get('value', '')
135
+
136
+ genes = entry.get('genes', [])
137
+ gene_name = genes[0].get('geneName', {}).get('value', '') if genes else ''
138
+
139
+ subcell = entry.get('comments', [])
140
+ locations = []
141
+ for comment in subcell:
142
+ if comment.get('commentType') == 'SUBCELLULAR LOCATION':
143
+ for loc in comment.get('subcellularLocations', []):
144
+ loc_val = loc.get('location', {}).get('value', '')
145
+ if loc_val:
146
+ locations.append(loc_val)
147
+
148
+ features = entry.get('features', [])
149
+ disorder_regions = []
150
+ for feat in features:
151
+ if feat.get('type') in ['Region', 'Compositional bias']:
152
+ desc = feat.get('description', '').lower()
153
+ if 'disordered' in desc or 'low complexity' in desc:
154
+ loc = feat.get('location', {})
155
+ start = loc.get('start', {}).get('value', 0)
156
+ end = loc.get('end', {}).get('value', 0)
157
+ disorder_regions.append((start, end))
158
+
159
+ records.append({
160
+ 'uniprot_acc': acc,
161
+ 'protein_name': protein_name,
162
+ 'gene_name': gene_name,
163
+ 'sequence': sequence,
164
+ 'length': length,
165
+ 'subcellular_locations': '|'.join(locations),
166
+ 'disorder_regions': str(disorder_regions),
167
+ 'is_mitochondrial': True
168
+ })
169
+
170
+ df = pd.DataFrame(records)
171
+
172
+ save_path = self.save_dir / 'uniprot_mitochondrial.parquet'
173
+ df.to_parquet(save_path)
174
+ print(f" ✓ : {save_path}")
175
+
176
+ return df
177
+
178
+ except Exception as e:
179
+ print(f" ✗ : {e}")
180
+ return pd.DataFrame()
181
+
182
+
183
+ class ClinVarDownloader:
184
+
185
+ def __init__(self, save_dir: Path):
186
+ self.save_dir = save_dir
187
+ self.save_dir.mkdir(parents=True, exist_ok=True)
188
+
189
+ def download_variants_for_genes(self, gene_list: List[str], max_per_gene: int = 100) -> pd.DataFrame:
190
+ base_url = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils"
191
+
192
+ all_variants = []
193
+
194
+ for gene in tqdm(gene_list[:50], desc=" Gènes"):
195
+ try:
196
+ search_url = f"{base_url}/esearch.fcgi"
197
+ search_params = {
198
+ 'db': 'clinvar',
199
+ 'term': f'{gene}[gene] AND ("pathogenic"[clinsig] OR "benign"[clinsig]) AND "single nucleotide variant"[vartype]',
200
+ 'retmax': max_per_gene,
201
+ 'retmode': 'json'
202
+ }
203
+
204
+ response = requests.get(search_url, params=search_params, timeout=30)
205
+ response.raise_for_status()
206
+ search_data = response.json()
207
+
208
+ id_list = search_data.get('esearchresult', {}).get('idlist', [])
209
+
210
+ if not id_list:
211
+ continue
212
+
213
+
214
+ time.sleep(0.34)
215
+
216
+ fetch_url = f"{base_url}/esummary.fcgi"
217
+ fetch_params = {
218
+ 'db': 'clinvar',
219
+ 'id': ','.join(id_list[:max_per_gene]),
220
+ 'retmode': 'json'
221
+ }
222
+
223
+ response = requests.get(fetch_url, params=fetch_params, timeout=30)
224
+ response.raise_for_status()
225
+ fetch_data = response.json()
226
+
227
+ results = fetch_data.get('result', {})
228
+
229
+ for uid in id_list[:max_per_gene]:
230
+ if uid not in results or uid == 'uids':
231
+ continue
232
+
233
+ variant = results[uid]
234
+
235
+
236
+ title = variant.get('title', '')
237
+ clinical_sig = variant.get('clinical_significance', {}).get('description', '')
238
+
239
+ protein_change = ''
240
+ if '(p.' in title:
241
+ start = title.find('(p.') + 3
242
+ end = title.find(')', start)
243
+ protein_change = title[start:end]
244
+
245
+ all_variants.append({
246
+ 'clinvar_id': uid,
247
+ 'gene': gene,
248
+ 'title': title,
249
+ 'protein_change': protein_change,
250
+ 'clinical_significance': clinical_sig,
251
+ 'is_pathogenic': 'pathogenic' in clinical_sig.lower(),
252
+ 'is_benign': 'benign' in clinical_sig.lower()
253
+ })
254
+
255
+ time.sleep(0.34)
256
+
257
+ except Exception as e:
258
+ print(f" Error {gene}: {e}")
259
+ continue
260
+
261
+ df = pd.DataFrame(all_variants)
262
+
263
+ if len(df) > 0:
264
+ save_path = self.save_dir / 'clinvar_variants.parquet'
265
+ df.to_parquet(save_path)
266
+ print(f" ✓ {len(df)} {save_path}")
267
+ else:
268
+ print("None")
269
+
270
+ return df
271
+
272
+ class MobiDBDownloader:
273
+
274
+ BASE_URL = "https://mobidb.org/api/download"
275
+
276
+ def __init__(self, save_dir: Path):
277
+ self.save_dir = save_dir
278
+ self.save_dir.mkdir(parents=True, exist_ok=True)
279
+
280
+ def download_for_proteins(self, uniprot_accs: List[str]) -> pd.DataFrame:
281
+
282
+
283
+ records = []
284
+
285
+ for acc in tqdm(uniprot_accs[:200], desc=" Protéines"):
286
+ try:
287
+ url = f"https://mobidb.org/api/download?acc={acc}&format=json"
288
+ response = requests.get(url, timeout=30)
289
+
290
+ if response.status_code != 200:
291
+ continue
292
+
293
+ data = response.json()
294
+
295
+ consensus = data.get('consensus', {})
296
+ disorder_regions = consensus.get('disorder', {}).get('regions', [])
297
+
298
+ plddt_regions = consensus.get('plddt', {}).get('regions', [])
299
+
300
+ records.append({
301
+ 'uniprot_acc': acc,
302
+ 'disorder_content': data.get('disorder_content', 0),
303
+ 'disorder_regions': str(disorder_regions),
304
+ 'plddt_low_regions': str(plddt_regions),
305
+ 'sequence_length': data.get('length', 0)
306
+ })
307
+
308
+ time.sleep(0.1)
309
+
310
+ except Exception as e:
311
+ continue
312
+
313
+ df = pd.DataFrame(records)
314
+
315
+ if len(df) > 0:
316
+ save_path = self.save_dir / 'mobidb_data.parquet'
317
+ df.to_parquet(save_path)
318
+ print(f" ✓ {len(df)} entrées sauvegardées: {save_path}")
319
+
320
+ return df
321
+
322
+
323
+ disprot_downloader = DisProtDownloader(PATHS['disprot'])
324
+ df_disprot = disprot_downloader.download_all()
325
+
326
+ uniprot_downloader = UniProtDownloader(PATHS['uniprot'])
327
+ df_uniprot = uniprot_downloader.download_mitochondrial_human(max_results=2000)
328
+
329
+ if len(df_uniprot) > 0:
330
+ mito_genes = df_uniprot['gene_name'].dropna().unique().tolist()
331
+ mito_genes = [g for g in mito_genes if g]
332
+
333
+ clinvar_downloader = ClinVarDownloader(PATHS['clinvar'])
334
+ df_clinvar = clinvar_downloader.download_variants_for_genes(mito_genes[:100])
335
+ else:
336
+ df_clinvar = pd.DataFrame()
337
+
338
+ if len(df_uniprot) > 0:
339
+ mito_accs = df_uniprot['uniprot_acc'].tolist()
340
+
341
+ mobidb_downloader = MobiDBDownloader(PATHS['mobidb'])
342
+ df_mobidb = mobidb_downloader.download_for_proteins(mito_accs[:200])
343
+ else:
344
+ df_mobidb = pd.DataFrame()
scripts/esm2_t33_650M_UR50D.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled17.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1GwdSjrwh3f6QCzOa8KHr_XkWy0KmZvdV
8
+ """
9
+
10
+ import torch
11
+ import pandas as pd
12
+ import numpy as np
13
+ from pathlib import Path
14
+ from tqdm import tqdm
15
+ import gc
16
+
17
+
18
+ PATHS = {
19
+ 'data_frozen': BASE_PATH / 'data' / 'frozen',
20
+ 'data_raw': BASE_PATH / 'data' / 'raw',
21
+ 'features': BASE_PATH / 'features',
22
+ 'embeddings': BASE_PATH / 'embeddings',
23
+ }
24
+
25
+ PATHS['embeddings'].mkdir(parents=True, exist_ok=True)
26
+
27
+ # Vérifier GPU
28
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
29
+ print(f" Device: {device}")
30
+ if torch.cuda.is_available():
31
+ print(f" GPU: {torch.cuda.get_device_name(0)}")
32
+ print(f" Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB")
33
+
34
+
35
+ try:
36
+ import esm
37
+ model_name = "esm2_t33_650M_UR50D"
38
+ model, alphabet = esm.pretrained.esm2_t33_650M_UR50D()
39
+ except ImportError:
40
+ import subprocess
41
+ subprocess.run(['pip', 'install', 'fair-esm'], check=True)
42
+ import esm
43
+ model, alphabet = esm.pretrained.esm2_t33_650M_UR50D()
44
+
45
+ model = model.to(device)
46
+ model.eval()
47
+
48
+ batch_converter = alphabet.get_batch_converter()
49
+
50
+ embedding_dim = model.embed_dim
51
+
52
+ df_full = pd.read_parquet(PATHS['features'] / 'features_classical_full.parquet')
53
+ df_strict = pd.read_parquet(PATHS['features'] / 'features_classical_mito_strict.parquet')
54
+
55
+ df_uniprot = pd.read_parquet(PATHS['data_raw'] / 'uniprot_human_reviewed.parquet')
56
+ seq_dict = dict(zip(df_uniprot['accession'], df_uniprot['sequence']))
57
+
58
+ print(f" full: {len(df_full):,}")
59
+ print(f" strict: {len(df_strict):,}")
60
+ print(f" Séquences: {len(seq_dict):,}")
61
+
62
+
63
+ def extract_esm_embeddings(sequences_dict, model, alphabet, batch_converter,
64
+ device, window=25, batch_size=4, max_length=1022):
65
+ embeddings = {}
66
+
67
+ data = [(acc, seq[:max_length]) for acc, seq in sequences_dict.items() if seq]
68
+
69
+ print(f" Extraction pour {len(data)} protéines...")
70
+
71
+ for i in tqdm(range(0, len(data), batch_size), desc="ESM-2"):
72
+ batch_data = data[i:i+batch_size]
73
+
74
+ try:
75
+ batch_labels, batch_strs, batch_tokens = batch_converter(batch_data)
76
+ batch_tokens = batch_tokens.to(device)
77
+
78
+ with torch.no_grad():
79
+ results = model(batch_tokens, repr_layers=[33], return_contacts=False)
80
+
81
+ representations = results["representations"][33]
82
+
83
+ for j, (acc, seq) in enumerate(batch_data):
84
+ seq_len = len(seq)
85
+
86
+
87
+ seq_repr = representations[j, 1:seq_len+1, :]
88
+ global_emb = seq_repr.mean(dim=0).cpu().numpy()
89
+
90
+ embeddings[acc] = global_emb
91
+
92
+ del batch_tokens, results, representations
93
+ if device.type == 'cuda':
94
+ torch.cuda.empty_cache()
95
+
96
+ except Exception as e:
97
+ print(f" ⚠️ Erreur batch {i}: {e}")
98
+ continue
99
+
100
+ return embeddings
101
+
102
+ proteins_full = set(df_full['uniprot_acc'].unique())
103
+ proteins_strict = set(df_strict['uniprot_acc'].unique())
104
+ proteins_all = proteins_full | proteins_strict
105
+
106
+
107
+ seq_to_process = {acc: seq_dict[acc] for acc in proteins_all if acc in seq_dict}
108
+
109
+ embeddings_global = extract_esm_embeddings(
110
+ seq_to_process,
111
+ model,
112
+ alphabet,
113
+ batch_converter,
114
+ device,
115
+ batch_size=2 if device.type == 'cuda' else 1, # Réduire si GPU limité
116
+ max_length=1022
117
+ )
118
+
119
+
120
+ def extract_local_embeddings(df, seq_dict, model, alphabet, batch_converter,
121
+ device, window=25, batch_size=8, max_per_batch=100):
122
+
123
+ local_embeddings = []
124
+
125
+ data = []
126
+ for idx, row in df.iterrows():
127
+ acc = row['uniprot_acc']
128
+ pos = row['position']
129
+ seq = seq_dict.get(acc, '')
130
+
131
+ if not seq or pos >= len(seq):
132
+ data.append((idx, None))
133
+ continue
134
+
135
+ start = max(0, pos - window)
136
+ end = min(len(seq), pos + window + 1)
137
+ local_seq = seq[start:end]
138
+
139
+ rel_pos = pos - start
140
+
141
+ data.append((idx, local_seq, rel_pos))
142
+
143
+ batch_data = []
144
+ batch_info = []
145
+
146
+ for item in tqdm(data, desc="Local embeddings"):
147
+ if item[1] is None:
148
+ local_embeddings.append({
149
+ 'idx': item[0],
150
+ 'embedding': np.zeros(embedding_dim)
151
+ })
152
+ continue
153
+
154
+ idx, local_seq, rel_pos = item
155
+ batch_data.append((f"mut_{idx}", local_seq))
156
+ batch_info.append((idx, rel_pos, len(local_seq)))
157
+
158
+ if len(batch_data) >= batch_size:
159
+ try:
160
+ batch_labels, batch_strs, batch_tokens = batch_converter(batch_data)
161
+ batch_tokens = batch_tokens.to(device)
162
+
163
+ with torch.no_grad():
164
+ results = model(batch_tokens, repr_layers=[33], return_contacts=False)
165
+
166
+ representations = results["representations"][33]
167
+
168
+ for j, (idx, rel_pos, seq_len) in enumerate(batch_info):
169
+ if rel_pos < seq_len:
170
+ mut_emb = representations[j, rel_pos + 1, :].cpu().numpy()
171
+ else:
172
+ mut_emb = representations[j, 1:seq_len+1, :].mean(dim=0).cpu().numpy()
173
+
174
+ local_embeddings.append({
175
+ 'idx': idx,
176
+ 'embedding': mut_emb
177
+ })
178
+
179
+ del batch_tokens, results, representations
180
+ if device.type == 'cuda':
181
+ torch.cuda.empty_cache()
182
+
183
+ except Exception as e:
184
+ for idx, _, _ in batch_info:
185
+ local_embeddings.append({
186
+ 'idx': idx,
187
+ 'embedding': np.zeros(embedding_dim)
188
+ })
189
+
190
+ batch_data = []
191
+ batch_info = []
192
+
193
+ if batch_data:
194
+ try:
195
+ batch_labels, batch_strs, batch_tokens = batch_converter(batch_data)
196
+ batch_tokens = batch_tokens.to(device)
197
+
198
+ with torch.no_grad():
199
+ results = model(batch_tokens, repr_layers=[33], return_contacts=False)
200
+
201
+ representations = results["representations"][33]
202
+
203
+ for j, (idx, rel_pos, seq_len) in enumerate(batch_info):
204
+ if rel_pos < seq_len:
205
+ mut_emb = representations[j, rel_pos + 1, :].cpu().numpy()
206
+ else:
207
+ mut_emb = representations[j, 1:seq_len+1, :].mean(dim=0).cpu().numpy()
208
+
209
+ local_embeddings.append({
210
+ 'idx': idx,
211
+ 'embedding': mut_emb
212
+ })
213
+ except:
214
+ for idx, _, _ in batch_info:
215
+ local_embeddings.append({
216
+ 'idx': idx,
217
+ 'embedding': np.zeros(embedding_dim)
218
+ })
219
+
220
+ return local_embeddings
221
+
222
+ local_emb_full = extract_local_embeddings(
223
+ df_full, seq_dict, model, alphabet, batch_converter, device,
224
+ window=25, batch_size=8
225
+ )
226
+
227
+ local_emb_strict = extract_local_embeddings(
228
+ df_strict, seq_dict, model, alphabet, batch_converter, device,
229
+ window=25, batch_size=8
230
+ )
231
+
232
+ def create_embedding_matrix(df, embeddings_global, local_embeddings, embedding_dim):
233
+
234
+ n_samples = len(df)
235
+
236
+ X_global = np.zeros((n_samples, embedding_dim))
237
+ X_local = np.zeros((n_samples, embedding_dim))
238
+
239
+ for i, row in df.iterrows():
240
+ acc = row['uniprot_acc']
241
+ idx = df.index.get_loc(i)
242
+
243
+ if acc in embeddings_global:
244
+ X_global[idx] = embeddings_global[acc]
245
+
246
+ local_dict = {item['idx']: item['embedding'] for item in local_embeddings}
247
+ for i, row in df.iterrows():
248
+ idx = df.index.get_loc(i)
249
+ if i in local_dict:
250
+ X_local[idx] = local_dict[i]
251
+
252
+ X_combined = np.concatenate([X_global, X_local], axis=1)
253
+
254
+ return X_global, X_local, X_combined
255
+
256
+ X_global_full, X_local_full, X_combined_full = create_embedding_matrix(
257
+ df_full, embeddings_global, local_emb_full, embedding_dim
258
+ )
259
+
260
+ X_global_strict, X_local_strict, X_combined_strict = create_embedding_matrix(
261
+ df_strict, embeddings_global, local_emb_strict, embedding_dim
262
+ )
263
+
264
+ print(f" Global: {X_global_full.shape}")
265
+ print(f" Local: {X_local_full.shape}")
266
+ print(f" Combiné: {X_combined_full.shape}")
267
+
268
+ np.save(PATHS['embeddings'] / 'embeddings_global_full.npy', X_global_full)
269
+ np.save(PATHS['embeddings'] / 'embeddings_local_full.npy', X_local_full)
270
+ np.save(PATHS['embeddings'] / 'embeddings_combined_full.npy', X_combined_full)
271
+
272
+ np.save(PATHS['embeddings'] / 'embeddings_global_strict.npy', X_global_strict)
273
+ np.save(PATHS['embeddings'] / 'embeddings_local_strict.npy', X_local_strict)
274
+ np.save(PATHS['embeddings'] / 'embeddings_combined_strict.npy', X_combined_strict)
275
+
276
+ import pickle
277
+ with open(PATHS['embeddings'] / 'embeddings_by_protein.pkl', 'wb') as f:
278
+ pickle.dump(embeddings_global, f)
scripts/final_mlp_embedding_model.py.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled17.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1GwdSjrwh3f6QCzOa8KHr_XkWy0KmZvdV
8
+ """
9
+
10
+ import pandas as pd
11
+ import numpy as np
12
+ from pathlib import Path
13
+ from sklearn.preprocessing import StandardScaler
14
+ from sklearn.metrics import roc_auc_score, average_precision_score, roc_curve, precision_recall_curve
15
+ from sklearn.linear_model import LogisticRegression
16
+ import matplotlib.pyplot as plt
17
+ from tqdm import tqdm
18
+ import pickle
19
+ import torch
20
+ import torch.nn as nn
21
+ import warnings
22
+ warnings.filterwarnings('ignore')
23
+
24
+
25
+ PATHS = {
26
+ 'features': BASE_PATH / 'features',
27
+ 'embeddings': BASE_PATH / 'embeddings',
28
+ 'models': BASE_PATH / 'models',
29
+ 'results': BASE_PATH / 'results',
30
+ 'figures': BASE_PATH / 'results' / 'figures',
31
+ }
32
+
33
+ df_features = pd.read_parquet(PATHS['features'] / 'features_classical_full.parquet')
34
+
35
+ id_cols = ['mutation_idx', 'uniprot_acc', 'gene_symbol', 'position', 'wt_aa', 'mut_aa', 'label']
36
+ feature_cols = [c for c in df_features.columns if c not in id_cols]
37
+
38
+ X_features = df_features[feature_cols].values.astype(np.float32)
39
+ X_features = np.nan_to_num(X_features, nan=0.0, posinf=0.0, neginf=0.0)
40
+ y = df_features['label'].values
41
+ proteins = df_features['uniprot_acc'].values
42
+
43
+ X_emb_combined = np.load(PATHS['embeddings'] / 'embeddings_combined_full.npy').astype(np.float32)
44
+ X_emb_local = np.load(PATHS['embeddings'] / 'embeddings_local_full.npy').astype(np.float32)
45
+
46
+ print(f" {X_features.shape}")
47
+ print(f" {X_emb_combined.shape}")
48
+
49
+
50
+
51
+ from sklearn.decomposition import PCA
52
+
53
+ pca_combined = PCA(n_components=128, random_state=42)
54
+ X_emb_pca = pca_combined.fit_transform(X_emb_combined).astype(np.float32)
55
+
56
+ pca_local = PCA(n_components=64, random_state=42)
57
+ X_emb_local_pca = pca_local.fit_transform(X_emb_local).astype(np.float32)
58
+
59
+
60
+
61
+ configs = [
62
+ {'name': 'Features classiques', 'X': X_features},
63
+ {'name': 'Embeddings ESM-2', 'X': X_emb_pca},
64
+ {'name': 'Features + Embeddings', 'X': np.concatenate([X_features, X_emb_pca], axis=1)},
65
+ {'name': 'Features + Emb. Local', 'X': np.concatenate([X_features, X_emb_local_pca], axis=1)},
66
+ ]
67
+
68
+ class SimpleMLP(nn.Module):
69
+ def __init__(self, input_dim, hidden_dim=256):
70
+ super().__init__()
71
+ self.net = nn.Sequential(
72
+ nn.Linear(input_dim, hidden_dim),
73
+ nn.ReLU(),
74
+ nn.Dropout(0.3),
75
+ nn.Linear(hidden_dim, hidden_dim // 2),
76
+ nn.ReLU(),
77
+ nn.Dropout(0.2),
78
+ nn.Linear(hidden_dim // 2, 1),
79
+ nn.Sigmoid()
80
+ )
81
+
82
+ def forward(self, x):
83
+ return self.net(x).squeeze()
84
+
85
+ def train_mlp(X_train, y_train, X_test, input_dim, device, epochs=50, lr=0.001):
86
+
87
+ model = SimpleMLP(input_dim).to(device)
88
+ optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=1e-4)
89
+ criterion = nn.BCELoss()
90
+
91
+ X_train_t = torch.FloatTensor(X_train).to(device)
92
+ y_train_t = torch.FloatTensor(y_train).to(device)
93
+ X_test_t = torch.FloatTensor(X_test).to(device)
94
+
95
+ model.train()
96
+ batch_size = 512
97
+
98
+ for epoch in range(epochs):
99
+ perm = torch.randperm(len(X_train_t))
100
+
101
+ for i in range(0, len(X_train_t), batch_size):
102
+ idx = perm[i:i+batch_size]
103
+
104
+ optimizer.zero_grad()
105
+ outputs = model(X_train_t[idx])
106
+ loss = criterion(outputs, y_train_t[idx])
107
+ loss.backward()
108
+ optimizer.step()
109
+
110
+ model.eval()
111
+ with torch.no_grad():
112
+ y_pred = model(X_test_t).cpu().numpy()
113
+
114
+ return y_pred
115
+
116
+ def evaluate_lpocv_gpu(X, y, proteins, device, epochs=30):
117
+
118
+ unique_proteins = np.unique(proteins)
119
+ results = []
120
+
121
+ scaler = StandardScaler()
122
+ X_scaled = scaler.fit_transform(X)
123
+
124
+ for protein in tqdm(unique_proteins, desc="LPOCV GPU"):
125
+ test_mask = proteins == protein
126
+ train_mask = ~test_mask
127
+
128
+ if test_mask.sum() < 2:
129
+ continue
130
+
131
+ X_train, y_train = X_scaled[train_mask], y[train_mask]
132
+ X_test, y_test = X_scaled[test_mask], y[test_mask]
133
+
134
+ y_pred = train_mlp(X_train, y_train, X_test, X.shape[1], device, epochs=epochs)
135
+
136
+ for pred, true in zip(y_pred, y_test):
137
+ results.append({'y_true': true, 'y_pred': float(pred)})
138
+
139
+ df_res = pd.DataFrame(results)
140
+
141
+ if len(df_res) > 0 and len(df_res['y_true'].unique()) > 1:
142
+ auc_roc = roc_auc_score(df_res['y_true'], df_res['y_pred'])
143
+ auc_pr = average_precision_score(df_res['y_true'], df_res['y_pred'])
144
+ else:
145
+ auc_roc, auc_pr = 0, 0
146
+
147
+ return auc_roc, auc_pr, df_res
148
+
149
+ results_all = {}
150
+
151
+ for cfg in configs:
152
+ print(f"\n 📊 {cfg['name']}...")
153
+
154
+ auc_roc, auc_pr, df_res = evaluate_lpocv_gpu(
155
+ cfg['X'], y, proteins, device, epochs=30
156
+ )
157
+
158
+ results_all[cfg['name']] = {
159
+ 'auc_roc': auc_roc,
160
+ 'auc_pr': auc_pr,
161
+ 'predictions': df_res,
162
+ 'n_features': cfg['X'].shape[1],
163
+ }
164
+
165
+ print(f" AUC-ROC: {auc_roc:.4f}")
166
+ print(f" AUC-PR: {auc_pr:.4f}")
167
+
168
+
169
+ def evaluate_lpocv_logreg(X, y, proteins):
170
+
171
+ unique_proteins = np.unique(proteins)
172
+ results = []
173
+
174
+ scaler = StandardScaler()
175
+ X_scaled = scaler.fit_transform(X)
176
+
177
+ for protein in tqdm(unique_proteins, desc="LPOCV LogReg", leave=False):
178
+ test_mask = proteins == protein
179
+ train_mask = ~test_mask
180
+
181
+ if test_mask.sum() < 2:
182
+ continue
183
+
184
+ X_train, y_train = X_scaled[train_mask], y[train_mask]
185
+ X_test, y_test = X_scaled[test_mask], y[test_mask]
186
+
187
+ model = LogisticRegression(max_iter=500, C=0.1, random_state=42)
188
+ model.fit(X_train, y_train)
189
+ y_pred = model.predict_proba(X_test)[:, 1]
190
+
191
+ for pred, true in zip(y_pred, y_test):
192
+ results.append({'y_true': true, 'y_pred': pred})
193
+
194
+ df_res = pd.DataFrame(results)
195
+
196
+ if len(df_res) > 0:
197
+ auc_roc = roc_auc_score(df_res['y_true'], df_res['y_pred'])
198
+ auc_pr = average_precision_score(df_res['y_true'], df_res['y_pred'])
199
+ else:
200
+ auc_roc, auc_pr = 0, 0
201
+
202
+ return auc_roc, auc_pr
203
+
204
+ for cfg in configs:
205
+ auc_roc, auc_pr = evaluate_lpocv_logreg(cfg['X'], y, proteins)
206
+ results_all[cfg['name']]['auc_roc_logreg'] = auc_roc
207
+ results_all[cfg['name']]['auc_pr_logreg'] = auc_pr
208
+ print(f" {cfg['name']}: LogReg AUC-ROC = {auc_roc:.4f}")
209
+
210
+
211
+ comparison_data = []
212
+ for name, res in results_all.items():
213
+ comparison_data.append({
214
+ 'Configuration': name,
215
+ 'Features': res['n_features'],
216
+ 'AUC-ROC (MLP)': res['auc_roc'],
217
+ 'AUC-PR (MLP)': res['auc_pr'],
218
+ 'AUC-ROC (LogReg)': res.get('auc_roc_logreg', 0),
219
+ })
220
+
221
+ df_comparison = pd.DataFrame(comparison_data)
222
+ df_comparison = df_comparison.sort_values('AUC-ROC (MLP)', ascending=False)
223
+
224
+ print("\n" + df_comparison.to_string(index=False))
225
+
226
+ best_name = df_comparison.iloc[0]['Configuration']
227
+ best_auc = df_comparison.iloc[0]['AUC-ROC (MLP)']
228
+ print(f"\n 🏆 Meilleur: {best_name} (AUC-ROC = {best_auc:.4f})")
229
+
230
+ df_comparison.to_csv(PATHS['results'] / 'comparison_final.csv', index=False)
scripts/hierarchical_validation_no_leakage.py.py ADDED
@@ -0,0 +1,426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled17.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1GwdSjrwh3f6QCzOa8KHr_XkWy0KmZvdV
8
+ """
9
+
10
+ import pandas as pd
11
+ import numpy as np
12
+ from pathlib import Path
13
+ from sklearn.preprocessing import StandardScaler
14
+ from sklearn.decomposition import PCA
15
+ from sklearn.metrics import roc_auc_score, average_precision_score
16
+ from sklearn.linear_model import LogisticRegression
17
+ import matplotlib.pyplot as plt
18
+ from tqdm import tqdm
19
+ import torch
20
+ import torch.nn as nn
21
+ import pickle
22
+ import warnings
23
+ warnings.filterwarnings('ignore')
24
+
25
+ PATHS = {
26
+ 'features': BASE_PATH / 'features',
27
+ 'embeddings': BASE_PATH / 'embeddings',
28
+ 'results': BASE_PATH / 'results',
29
+ 'figures': BASE_PATH / 'results' / 'figures',
30
+ }
31
+
32
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
33
+ print(f" Device: {device}")
34
+
35
+
36
+ df = pd.read_parquet(PATHS['features'] / 'features_classical_full.parquet')
37
+
38
+ id_cols = ['mutation_idx', 'uniprot_acc', 'gene_symbol', 'position', 'wt_aa', 'mut_aa', 'label']
39
+ feature_cols = [c for c in df.columns if c not in id_cols]
40
+
41
+ X_features = df[feature_cols].values.astype(np.float32)
42
+ X_features = np.nan_to_num(X_features, nan=0.0, posinf=0.0, neginf=0.0)
43
+
44
+ X_emb_raw = np.load(PATHS['embeddings'] / 'embeddings_combined_full.npy').astype(np.float32)
45
+
46
+ y = df['label'].values
47
+ proteins = df['uniprot_acc'].values
48
+
49
+
50
+
51
+
52
+ AXES = {
53
+ 'OXPHOS_CI': ['NDUFAF1', 'NDUFAF2', 'NDUFAF3', 'NDUFAF4', 'NDUFAF5', 'NDUFAF6',
54
+ 'NDUFS1', 'NDUFS2', 'NDUFS3', 'NDUFS4', 'NDUFS6', 'NDUFS7', 'NDUFS8',
55
+ 'NDUFV1', 'NDUFV2', 'NDUFA1', 'NDUFA2', 'NDUFA9', 'NDUFA10', 'NDUFA11',
56
+ 'ACAD9', 'TIMMDC1', 'FOXRED1', 'NUBPL'],
57
+
58
+ 'OXPHOS_CIV': ['SURF1', 'SCO1', 'SCO2', 'COX10', 'COX14', 'COX15', 'COX20',
59
+ 'COA5', 'COA6', 'COA7', 'COX4I1', 'COX6A1', 'COX6B1', 'COX7B',
60
+ 'COX8A', 'PET100', 'PET117'],
61
+
62
+ 'DYNAMICS': ['OPA1', 'MFN1', 'MFN2', 'DNM1L', 'FIS1', 'AFG3L2', 'SPG7',
63
+ 'YME1L1', 'OMA1', 'LONP1'],
64
+
65
+ 'TRANSLATION': ['AARS2', 'DARS2', 'EARS2', 'FARS2', 'HARS2', 'IARS2', 'LARS2',
66
+ 'MARS2', 'NARS2', 'RARS2', 'SARS2', 'TARS2', 'VARS2', 'YARS2',
67
+ 'GFM1', 'TSFM', 'TUFM', 'C12orf65'],
68
+
69
+ 'MTDNA': ['POLG', 'POLG2', 'TWNK', 'TFAM', 'RRM2B', 'MPV17', 'DGUOK', 'TK2',
70
+ 'SUCLA2', 'SUCLG1', 'FBXL4'],
71
+
72
+ 'METABOLISM': ['PDHA1', 'PDHB', 'PDHX', 'DLD', 'PC', 'PCCA', 'PCCB', 'MUT',
73
+ 'LIAS', 'LIPT1', 'BOLA3', 'NFU1', 'ISCA1', 'ISCA2', 'GLRX5'],
74
+
75
+ 'IMPORT': ['TIMM50', 'TIMM8A', 'DNAJC19', 'AGK', 'TOMM20', 'TOMM40',
76
+ 'HSPA9', 'HSPD1'],
77
+
78
+ 'REDOX_IMS': ['CHCHD2', 'CHCHD10', 'CHCHD4', 'AIFM1', 'COX17', 'GFER'],
79
+ }
80
+
81
+ df['axis'] = 'OTHER'
82
+ for axis_name, genes in AXES.items():
83
+ mask = df['gene_symbol'].isin(genes)
84
+ df.loc[mask, 'axis'] = axis_name
85
+
86
+ for axis in df['axis'].value_counts().index:
87
+ n = (df['axis'] == axis).sum()
88
+ n_patho = (df[df['axis'] == axis]['label'] == 1).sum()
89
+ print(f" {axis:<15} n={n:>5} ({n_patho} patho)")
90
+
91
+ def get_family(gene):
92
+ prefixes = ['NDUF', 'COX', 'ATP5', 'SDH', 'UQCR', 'TIM', 'TOM',
93
+ 'SLC25', 'MRPL', 'MRPS', 'CHCHD', 'COA']
94
+ for prefix in prefixes:
95
+ if gene.startswith(prefix):
96
+ return prefix
97
+ return gene[:3] if len(gene) >= 3 else gene
98
+
99
+ df['family'] = df['gene_symbol'].apply(get_family)
100
+ axes = df['axis'].values
101
+ families = df['family'].values
102
+
103
+
104
+
105
+ def prepare_data_no_leakage(X_features, X_emb_raw, train_mask, test_mask, n_pca=128):
106
+
107
+ X_feat_train = X_features[train_mask]
108
+ X_feat_test = X_features[test_mask]
109
+ X_emb_train = X_emb_raw[train_mask]
110
+ X_emb_test = X_emb_raw[test_mask]
111
+
112
+ scaler_feat = StandardScaler()
113
+ X_feat_train_s = scaler_feat.fit_transform(X_feat_train)
114
+ X_feat_test_s = scaler_feat.transform(X_feat_test)
115
+
116
+ pca = PCA(n_components=min(n_pca, X_emb_train.shape[0] - 1), random_state=42)
117
+ X_emb_train_pca = pca.fit_transform(X_emb_train)
118
+ X_emb_test_pca = pca.transform(X_emb_test)
119
+
120
+ scaler_emb = StandardScaler()
121
+ X_emb_train_s = scaler_emb.fit_transform(X_emb_train_pca)
122
+ X_emb_test_s = scaler_emb.transform(X_emb_test_pca)
123
+
124
+ X_train = np.concatenate([X_feat_train_s, X_emb_train_s], axis=1)
125
+ X_test = np.concatenate([X_feat_test_s, X_emb_test_s], axis=1)
126
+
127
+ return X_train.astype(np.float32), X_test.astype(np.float32)
128
+
129
+ class SimpleMLP(nn.Module):
130
+ def __init__(self, input_dim, hidden_dim=256):
131
+ super().__init__()
132
+ self.net = nn.Sequential(
133
+ nn.Linear(input_dim, hidden_dim),
134
+ nn.ReLU(),
135
+ nn.Dropout(0.3),
136
+ nn.Linear(hidden_dim, hidden_dim // 2),
137
+ nn.ReLU(),
138
+ nn.Dropout(0.2),
139
+ nn.Linear(hidden_dim // 2, 1),
140
+ nn.Sigmoid()
141
+ )
142
+
143
+ def forward(self, x):
144
+ return self.net(x).squeeze()
145
+
146
+ def train_mlp_predict(X_train, y_train, X_test, device, epochs=50):
147
+ """Entraîner MLP et prédire"""
148
+ model = SimpleMLP(X_train.shape[1]).to(device)
149
+ optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-4)
150
+ criterion = nn.BCELoss()
151
+
152
+ X_train_t = torch.FloatTensor(X_train).to(device)
153
+ y_train_t = torch.FloatTensor(y_train).to(device)
154
+ X_test_t = torch.FloatTensor(X_test).to(device)
155
+
156
+ model.train()
157
+ for _ in range(epochs):
158
+ optimizer.zero_grad()
159
+ loss = criterion(model(X_train_t), y_train_t)
160
+ loss.backward()
161
+ optimizer.step()
162
+
163
+ model.eval()
164
+ with torch.no_grad():
165
+ return model(X_test_t).cpu().numpy()
166
+
167
+ def train_logreg_predict(X_train, y_train, X_test):
168
+ """Entraîner LogReg et prédire (plus rapide)"""
169
+ model = LogisticRegression(max_iter=500, C=0.1, random_state=42)
170
+ model.fit(X_train, y_train)
171
+ return model.predict_proba(X_test)[:, 1]
172
+
173
+
174
+ unique_proteins = np.unique(proteins)
175
+ lpocv_results = []
176
+
177
+ for protein in tqdm(unique_proteins, desc="LPOCV"):
178
+ test_mask = proteins == protein
179
+ train_mask = ~test_mask
180
+
181
+ if test_mask.sum() < 2:
182
+ continue
183
+
184
+ y_train, y_test = y[train_mask], y[test_mask]
185
+
186
+ X_train, X_test = prepare_data_no_leakage(
187
+ X_features, X_emb_raw, train_mask, test_mask, n_pca=128
188
+ )
189
+
190
+ y_pred = train_mlp_predict(X_train, y_train, X_test, device, epochs=30)
191
+
192
+ for pred, true in zip(y_pred, y_test):
193
+ lpocv_results.append({'y_true': int(true), 'y_pred': float(pred)})
194
+
195
+ df_lpocv = pd.DataFrame(lpocv_results)
196
+ auc_lpocv = roc_auc_score(df_lpocv['y_true'], df_lpocv['y_pred'])
197
+ ap_lpocv = average_precision_score(df_lpocv['y_true'], df_lpocv['y_pred'])
198
+
199
+ unique_axes = [a for a in df['axis'].unique() if a != 'OTHER']
200
+ lao_results = {}
201
+
202
+ for axis in tqdm(unique_axes, desc="Leave-Axis-Out"):
203
+ test_mask = axes == axis
204
+ train_mask = axes != axis
205
+
206
+ n_test = test_mask.sum()
207
+ if n_test < 20:
208
+ continue
209
+
210
+ y_train, y_test = y[train_mask], y[test_mask]
211
+
212
+ if len(np.unique(y_test)) < 2:
213
+ continue
214
+
215
+ X_train, X_test = prepare_data_no_leakage(
216
+ X_features, X_emb_raw, train_mask, test_mask, n_pca=128
217
+ )
218
+
219
+ y_pred = train_logreg_predict(X_train, y_train, X_test)
220
+
221
+ auc = roc_auc_score(y_test, y_pred)
222
+ ap = average_precision_score(y_test, y_pred)
223
+
224
+ lao_results[axis] = {
225
+ 'n': int(n_test),
226
+ 'n_patho': int((y_test == 1).sum()),
227
+ 'auc_roc': float(auc),
228
+ 'auc_pr': float(ap),
229
+ }
230
+
231
+ print(f" {axis:<15} n={n_test:>4} AUC={auc:.3f} AP={ap:.3f}")
232
+
233
+ family_counts = df['family'].value_counts()
234
+ large_families = family_counts[family_counts >= 50].index.tolist()
235
+
236
+ lfo_results = {}
237
+
238
+ for family in tqdm(large_families[:12], desc="Leave-Family-Out"):
239
+ test_mask = families == family
240
+ train_mask = families != family
241
+
242
+ n_test = test_mask.sum()
243
+ y_train, y_test = y[train_mask], y[test_mask]
244
+
245
+ if len(np.unique(y_test)) < 2:
246
+ continue
247
+
248
+ X_train, X_test = prepare_data_no_leakage(
249
+ X_features, X_emb_raw, train_mask, test_mask, n_pca=128
250
+ )
251
+
252
+ y_pred = train_logreg_predict(X_train, y_train, X_test)
253
+
254
+ auc = roc_auc_score(y_test, y_pred)
255
+
256
+ lfo_results[family] = {
257
+ 'n': int(n_test),
258
+ 'auc_roc': float(auc),
259
+ }
260
+
261
+ print(f" {family:<10} n={n_test:>4} AUC={auc:.3f}")
262
+
263
+
264
+ strata = {
265
+ 'Cysteine_lost': df['cysteine_lost'] == 1,
266
+ 'Cysteine_gained': df['cysteine_gained'] == 1,
267
+ 'Charge_modified': (df['charge_introducing'] == 1) | (df['charge_removing'] == 1),
268
+ 'Proline_intro': df['proline_introduced'] == 1,
269
+ 'N_terminal': df['is_n_terminal'] == 1,
270
+ 'Cys_rich_prot': df['protein_cysteine_fraction'] > 0.03,
271
+ 'ROS_vulnerable': df['ros_vulnerability_score'] > 2,
272
+ }
273
+
274
+ strata_results = {}
275
+
276
+ for strata_name, mask in strata.items():
277
+ n = mask.sum()
278
+ if n < 50:
279
+ continue
280
+
281
+ idx_strata = np.where(mask)[0]
282
+ proteins_strata = proteins[mask]
283
+ unique_prot_strata = np.unique(proteins_strata)
284
+
285
+ results = []
286
+ for protein in unique_prot_strata:
287
+ test_m = proteins_strata == protein
288
+ train_m = ~test_m
289
+
290
+ if test_m.sum() < 2:
291
+ continue
292
+
293
+ train_idx = idx_strata[train_m]
294
+ test_idx = idx_strata[test_m]
295
+
296
+ global_train_mask = np.zeros(len(y), dtype=bool)
297
+ global_test_mask = np.zeros(len(y), dtype=bool)
298
+ global_train_mask[train_idx] = True
299
+ global_test_mask[test_idx] = True
300
+
301
+ y_train_s, y_test_s = y[global_train_mask], y[global_test_mask]
302
+
303
+ X_train, X_test = prepare_data_no_leakage(
304
+ X_features, X_emb_raw, global_train_mask, global_test_mask, n_pca=64
305
+ )
306
+
307
+ y_pred = train_logreg_predict(X_train, y_train_s, X_test)
308
+
309
+ for pred, true in zip(y_pred, y_test_s):
310
+ results.append({'y_true': true, 'y_pred': pred})
311
+
312
+ if len(results) > 30:
313
+ df_res = pd.DataFrame(results)
314
+ if len(df_res['y_true'].unique()) > 1:
315
+ auc = roc_auc_score(df_res['y_true'], df_res['y_pred'])
316
+ strata_results[strata_name] = {
317
+ 'n': int(n),
318
+ 'n_patho': int((df.loc[mask, 'label'] == 1).sum()),
319
+ 'auc_roc': float(auc),
320
+ }
321
+ print(f" {strata_name:<20} n={n:>4} AUC={auc:.3f}")
322
+
323
+
324
+ feature_groups = {
325
+ 'Substitution': ['delta_hydrophobicity', 'delta_charge', 'delta_volume',
326
+ 'delta_disorder_propensity', 'abs_delta_hydro', 'abs_delta_charge',
327
+ 'abs_delta_volume', 'delta_aromatic'],
328
+ 'Local_context': ['local_hydro_mean', 'local_charge_mean', 'local_disorder_mean',
329
+ 'local_charged_fraction', 'local_aromatic_fraction',
330
+ 'local_proline_fraction', 'local_glycine_fraction',
331
+ 'local_cysteine_fraction', 'local_disorder_promoting',
332
+ 'local_order_promoting', 'local_sequence_entropy'],
333
+ 'Position': ['position_absolute', 'position_normalized', 'is_n_terminal',
334
+ 'is_c_terminal', 'distance_to_n_term', 'distance_to_c_term'],
335
+ 'Cysteine_ROS': ['cysteine_gained', 'cysteine_lost', 'cysteine_change',
336
+ 'nearby_cysteine_count', 'cysteine_in_cys_rich_region',
337
+ 'ros_vulnerability_score'],
338
+ 'Protein_global': ['protein_length', 'protein_cysteine_count',
339
+ 'protein_cysteine_fraction', 'protein_charged_fraction',
340
+ 'protein_disorder_mean'],
341
+ 'Composite': ['idp_disruption_score', 'import_disruption_score'],
342
+ }
343
+
344
+ np.random.seed(42)
345
+ sample_proteins = np.random.choice(unique_proteins, size=min(80, len(unique_proteins)), replace=False)
346
+
347
+ def ablation_lpocv(X_feat_ablated, X_emb_raw, y, proteins, sample_proteins, n_pca=64):
348
+ results = []
349
+
350
+ for protein in sample_proteins:
351
+ test_mask = proteins == protein
352
+ train_mask = ~test_mask
353
+
354
+ if test_mask.sum() < 2:
355
+ continue
356
+
357
+ y_train, y_test = y[train_mask], y[test_mask]
358
+
359
+ X_train, X_test = prepare_data_no_leakage(
360
+ X_feat_ablated, X_emb_raw, train_mask, test_mask, n_pca=n_pca
361
+ )
362
+
363
+ y_pred = train_logreg_predict(X_train, y_train, X_test)
364
+
365
+ for pred, true in zip(y_pred, y_test):
366
+ results.append({'y_true': true, 'y_pred': pred})
367
+
368
+ df_res = pd.DataFrame(results)
369
+ if len(df_res) > 0 and len(df_res['y_true'].unique()) > 1:
370
+ return roc_auc_score(df_res['y_true'], df_res['y_pred'])
371
+ return 0
372
+
373
+ auc_baseline = ablation_lpocv(X_features, X_emb_raw, y, proteins, sample_proteins)
374
+ print(f" AUC baseline: {auc_baseline:.4f}")
375
+
376
+ auc_no_emb = ablation_lpocv(X_features, np.zeros_like(X_emb_raw[:, :10]), y, proteins, sample_proteins, n_pca=8)
377
+ print(f" AUC sans embeddings: {auc_no_emb:.4f} Δ={auc_no_emb - auc_baseline:+.4f}")
378
+
379
+ feature_to_idx = {f: i for i, f in enumerate(feature_cols)}
380
+ ablation_results = {'Embeddings_ESM2': {'auc': auc_no_emb, 'delta': auc_no_emb - auc_baseline}}
381
+
382
+ for group_name, group_features in feature_groups.items():
383
+ remove_idx = [feature_to_idx[f] for f in group_features if f in feature_to_idx]
384
+ keep_idx = [i for i in range(len(feature_cols)) if i not in remove_idx]
385
+
386
+ X_ablated = X_features[:, keep_idx]
387
+
388
+ auc = ablation_lpocv(X_ablated, X_emb_raw, y, proteins, sample_proteins)
389
+ delta = auc - auc_baseline
390
+
391
+ ablation_results[group_name] = {
392
+ 'auc': float(auc),
393
+ 'delta': float(delta),
394
+ 'n_removed': len(remove_idx),
395
+ }
396
+
397
+ print(f" Sans {group_name:<15} AUC={auc:.4f} Δ={delta:+.4f} (-{len(remove_idx)} feat)")
398
+
399
+
400
+ lao_mean = np.mean([r['auc_roc'] for r in lao_results.values()]) if lao_results else 0
401
+ lao_std = np.std([r['auc_roc'] for r in lao_results.values()]) if lao_results else 0
402
+ lfo_mean = np.mean([r['auc_roc'] for r in lfo_results.values()]) if lfo_results else 0
403
+ most_important = min(ablation_results, key=lambda x: ablation_results[x]['delta']) if ablation_results else 'N/A'
404
+
405
+
406
+ results_summary = {
407
+ 'lpocv': {
408
+ 'auc_roc': float(auc_lpocv),
409
+ 'auc_pr': float(ap_lpocv),
410
+ 'n_predictions': len(df_lpocv),
411
+ },
412
+ 'leave_axis_out': lao_results,
413
+ 'leave_family_out': lfo_results,
414
+ 'stratification': strata_results,
415
+ 'ablation': ablation_results,
416
+ 'methodology': {
417
+ 'scaler': 'StandardScaler fit on train only',
418
+ 'pca': 'PCA fit on train only (128 components)',
419
+ 'axes_source': 'OMIM, ClinVar, MitoCarta 3.0, KEGG',
420
+ }
421
+ }
422
+
423
+ with open(PATHS['results'] / 'hierarchical_validation_final.pkl', 'wb') as f:
424
+ pickle.dump(results_summary, f)
425
+
426
+ df_lpocv.to_parquet(PATHS['results'] / 'lpocv_predictions_final.parquet')
scripts/lpocv_validation.py.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled17.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1GwdSjrwh3f6QCzOa8KHr_XkWy0KmZvdV
8
+ """
9
+
10
+ import pandas as pd
11
+ import numpy as np
12
+ from sklearn.ensemble import GradientBoostingClassifier
13
+ from sklearn.metrics import roc_auc_score, average_precision_score, precision_recall_curve, f1_score
14
+ from sklearn.preprocessing import StandardScaler
15
+ from tqdm.auto import tqdm
16
+ import warnings
17
+ warnings.filterwarnings('ignore')
18
+
19
+ ####Leave-Protein-Out Cross-Validation (LPOCV) of the mechanistic pathogenicity model.
20
+
21
+
22
+ df_features = pd.read_parquet(PATHS['data_processed'] / 'mutation_features.parquet')
23
+
24
+ df_features['disorder_x_charge'] = df_features['delta_disorder_propensity'] * df_features['delta_charge']
25
+ df_features['disorder_x_hydro'] = df_features['delta_disorder_propensity'] * df_features['delta_hydrophobicity']
26
+ df_features['ros_x_cysteine'] = df_features['ros_sensitivity'] * df_features['local_cysteine_density']
27
+ df_features['propagation_x_disorder'] = df_features['propagation_extent'] * abs(df_features['predicted_delta_disorder_mean'])
28
+ df_features['disorder_confidence_ratio'] = df_features['predicted_delta_disorder_mean'] / (df_features['predicted_delta_disorder_std'] + 0.01)
29
+ df_features['abs_delta_disorder'] = abs(df_features['delta_disorder_propensity'])
30
+ df_features['abs_delta_charge'] = abs(df_features['delta_charge'])
31
+ df_features['abs_delta_hydro'] = abs(df_features['delta_hydrophobicity'])
32
+ df_features['is_n_terminal'] = (df_features['position'] < 50).astype(int)
33
+ df_features['is_c_terminal'] = 0
34
+ df_features['is_charge_changing'] = (df_features['delta_charge'] != 0).astype(int)
35
+ df_features['is_disorder_increasing'] = (df_features['delta_disorder_propensity'] > 0).astype(int)
36
+ df_features['is_high_ros'] = (df_features['ros_sensitivity'] > 0.5).astype(int)
37
+ df_features['region_matrix'] = (df_features['region_type'] == 'matrix_idr').astype(int)
38
+ df_features['region_ims'] = (df_features['region_type'] == 'ims_idr').astype(int)
39
+ df_features['region_presequence'] = (df_features['region_type'] == 'presequence').astype(int)
40
+ df_features['region_membrane'] = (df_features['region_type'] == 'membrane_adjacent').astype(int)
41
+
42
+ df_features['has_disorder_annotation'] = df_features['in_disorder_region'].notna()
43
+ df_features['in_disorder_region'] = df_features['in_disorder_region'].fillna(False)
44
+
45
+ print(f" ✓ {len(df_features)} mutations")
46
+ print(f" ✓ {df_features['uniprot_acc'].nunique()} protéines uniques")
47
+
48
+
49
+
50
+ features_mechanistic = [
51
+ 'delta_hydrophobicity', 'delta_charge', 'delta_volume',
52
+ 'delta_disorder_propensity', 'delta_aromatic',
53
+ 'local_charge_density', 'local_disorder_mean', 'local_disorder_variance',
54
+ 'local_hydrophobicity', 'local_aromatic_density',
55
+ 'local_proline_density', 'local_glycine_density', 'local_cysteine_density',
56
+ 'predicted_delta_disorder_mean', 'predicted_delta_disorder_std',
57
+ 'propagation_extent', 'max_effective_delta',
58
+ 'delta_cpr', 'delta_ncpr', 'delta_kappa',
59
+ 'ros_sensitivity', 'import_efficiency_change',
60
+ 'cysteine_gained', 'cysteine_lost', 'disulfide_disruption_risk',
61
+ 'oxidation_sensitivity_change',
62
+ 'region_matrix', 'region_ims', 'region_presequence', 'region_membrane',
63
+ 'disorder_x_charge', 'disorder_x_hydro', 'ros_x_cysteine',
64
+ 'propagation_x_disorder', 'disorder_confidence_ratio',
65
+ 'abs_delta_disorder', 'abs_delta_charge', 'abs_delta_hydro',
66
+ 'is_n_terminal', 'is_charge_changing', 'is_disorder_increasing', 'is_high_ros'
67
+ ]
68
+
69
+
70
+ def leave_protein_out_cv(df, feature_cols, model_params, threshold=None):
71
+
72
+ proteins = df['uniprot_acc'].unique()
73
+
74
+ all_y_true = []
75
+ all_y_prob = []
76
+ all_y_pred = []
77
+ protein_results = []
78
+
79
+ for protein in tqdm(proteins, desc="LPOCV"):
80
+ train_mask = df['uniprot_acc'] != protein
81
+ test_mask = df['uniprot_acc'] == protein
82
+
83
+ df_train = df[train_mask]
84
+ df_test = df[test_mask]
85
+
86
+ if len(df_test) < 2 or df_train['label'].sum() < 5:
87
+ continue
88
+
89
+ X_train = df_train[feature_cols].fillna(0).values
90
+ y_train = df_train['label'].values
91
+ X_test = df_test[feature_cols].fillna(0).values
92
+ y_test = df_test['label'].values
93
+
94
+ scaler = StandardScaler()
95
+ X_train_scaled = scaler.fit_transform(X_train)
96
+ X_test_scaled = scaler.transform(X_test)
97
+
98
+ model = GradientBoostingClassifier(**model_params)
99
+ model.fit(X_train_scaled, y_train)
100
+
101
+ y_prob = model.predict_proba(X_test_scaled)[:, 1]
102
+
103
+ all_y_true.extend(y_test)
104
+ all_y_prob.extend(y_prob)
105
+
106
+ if y_test.sum() > 0:
107
+ try:
108
+ protein_auc = roc_auc_score(y_test, y_prob)
109
+ except:
110
+ protein_auc = np.nan
111
+ protein_results.append({
112
+ 'protein': protein,
113
+ 'n_mutations': len(y_test),
114
+ 'n_pathogenic': y_test.sum(),
115
+ 'auc': protein_auc
116
+ })
117
+
118
+ all_y_true = np.array(all_y_true)
119
+ all_y_prob = np.array(all_y_prob)
120
+
121
+ auc_roc = roc_auc_score(all_y_true, all_y_prob)
122
+ auc_pr = average_precision_score(all_y_true, all_y_prob)
123
+
124
+ if threshold is None:
125
+ precisions, recalls, thresholds = precision_recall_curve(all_y_true, all_y_prob)
126
+ f1_scores = 2 * (precisions * recalls) / (precisions + recalls + 1e-10)
127
+ optimal_idx = np.argmax(f1_scores)
128
+ threshold = thresholds[optimal_idx] if optimal_idx < len(thresholds) else 0.5
129
+
130
+ all_y_pred = (all_y_prob >= threshold).astype(int)
131
+
132
+ tp = ((all_y_pred == 1) & (all_y_true == 1)).sum()
133
+ fp = ((all_y_pred == 1) & (all_y_true == 0)).sum()
134
+ fn = ((all_y_pred == 0) & (all_y_true == 1)).sum()
135
+
136
+ recall = tp / (tp + fn) if (tp + fn) > 0 else 0
137
+ precision = tp / (tp + fp) if (tp + fp) > 0 else 0
138
+
139
+ return {
140
+ 'auc_roc': auc_roc,
141
+ 'auc_pr': auc_pr,
142
+ 'threshold': threshold,
143
+ 'recall': recall,
144
+ 'precision': precision,
145
+ 'y_true': all_y_true,
146
+ 'y_prob': all_y_prob,
147
+ 'protein_results': pd.DataFrame(protein_results)
148
+ }
149
+
150
+ model_params = {
151
+ 'n_estimators': 200,
152
+ 'max_depth': 5,
153
+ 'learning_rate': 0.05,
154
+ 'min_samples_split': 10,
155
+ 'min_samples_leaf': 5,
156
+ 'subsample': 0.8,
157
+ 'random_state': 42
158
+ }
159
+
160
+ results_A = leave_protein_out_cv(df_features, features_mechanistic, model_params)
161
+
162
+ print(f" AUC-ROC: {results_A['auc_roc']:.4f}")
163
+ print(f" AUC-PR: {results_A['auc_pr']:.4f}")
164
+ print(f" Seuil: {results_A['threshold']:.3f}")
165
+ print(f" Recall: {results_A['recall']:.2%}")
166
+ print(f" Precision: {results_A['precision']:.2%}")
167
+
168
+
169
+
170
+ df_protein_results = results_A['protein_results'].dropna()
171
+ df_protein_results = df_protein_results.sort_values('n_pathogenic', ascending=False)
172
+
173
+ for _, row in df_protein_results.head(10).iterrows():
174
+ auc_str = f"{row['auc']:.2f}" if not np.isnan(row['auc']) else "N/A"
175
+ print(f" {row['protein']}: {row['n_mutations']} mut ({row['n_pathogenic']} patho) - AUC: {auc_str}")
176
+
177
+
178
+ validation_results = {
179
+ 'model_A_mechanistic': {
180
+ 'features': features_mechanistic,
181
+ 'auc_roc': results_A['auc_roc'],
182
+ 'auc_pr': results_A['auc_pr'],
183
+ 'threshold': results_A['threshold'],
184
+ 'recall': results_A['recall'],
185
+ 'precision': results_A['precision']
186
+ },
187
+
188
+ }
189
+
190
+ import json
191
+ results_path = PATHS['evaluations'] / 'lpocv_results.json'
192
+ with open(results_path, 'w') as f:
193
+ json.dump(validation_results, f, indent=2)
scripts/model_comparison_features_vs_esm2.py.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled17.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1GwdSjrwh3f6QCzOa8KHr_XkWy0KmZvdV
8
+ """
9
+
10
+ import pandas as pd
11
+ import numpy as np
12
+ from pathlib import Path
13
+ from sklearn.ensemble import GradientBoostingClassifier
14
+ from sklearn.preprocessing import StandardScaler
15
+ from sklearn.decomposition import PCA
16
+ from sklearn.metrics import roc_auc_score, average_precision_score, roc_curve, precision_recall_curve
17
+ import matplotlib.pyplot as plt
18
+ from tqdm import tqdm
19
+ import pickle
20
+ import warnings
21
+ warnings.filterwarnings('ignore')
22
+
23
+ PATHS = {
24
+ 'features': BASE_PATH / 'features',
25
+ 'embeddings': BASE_PATH / 'embeddings',
26
+ 'models': BASE_PATH / 'models',
27
+ 'results': BASE_PATH / 'results',
28
+ 'figures': BASE_PATH / 'results' / 'figures',
29
+ }
30
+
31
+ PATHS['figures'].mkdir(parents=True, exist_ok=True)
32
+
33
+
34
+
35
+ df_features = pd.read_parquet(PATHS['features'] / 'features_classical_full.parquet')
36
+
37
+ id_cols = ['mutation_idx', 'uniprot_acc', 'gene_symbol', 'position', 'wt_aa', 'mut_aa', 'label']
38
+ feature_cols = [c for c in df_features.columns if c not in id_cols]
39
+
40
+ X_features = df_features[feature_cols].values
41
+ X_features = np.nan_to_num(X_features, nan=0.0, posinf=0.0, neginf=0.0)
42
+ y = df_features['label'].values
43
+ proteins = df_features['uniprot_acc'].values
44
+
45
+ print(f" Features classiques: {X_features.shape}")
46
+
47
+ X_emb_combined = np.load(PATHS['embeddings'] / 'embeddings_combined_full.npy')
48
+ X_emb_local = np.load(PATHS['embeddings'] / 'embeddings_local_full.npy')
49
+
50
+ print(f"{X_emb_combined.shape}")
51
+ print(f"{X_emb_local.shape}")
52
+
53
+ print(f"\n {np.sum(y==1)} pathogènes, {np.sum(y==0)} bénins")
54
+ print(f" {len(np.unique(proteins))}")
55
+
56
+
57
+ n_components_combined = 128
58
+ pca_combined = PCA(n_components=n_components_combined, random_state=42)
59
+ X_emb_pca = pca_combined.fit_transform(X_emb_combined)
60
+ print(f" {X_emb_combined.shape[1]} → {n_components_combined}")
61
+ print(f" {pca_combined.explained_variance_ratio_.sum():.2%}")
62
+
63
+
64
+ n_components_local = 64
65
+ pca_local = PCA(n_components=n_components_local, random_state=42)
66
+ X_emb_local_pca = pca_local.fit_transform(X_emb_local)
67
+ print(f" {X_emb_local.shape[1]} → {n_components_local}")
68
+ print(f" {pca_local.explained_variance_ratio_.sum():.2%}")
69
+
70
+ configs = [
71
+ {
72
+ 'name': 'Features classiques',
73
+ 'X': X_features,
74
+ },
75
+ {
76
+ 'name': 'Embeddings ESM-2',
77
+ 'X': X_emb_pca,
78
+ },
79
+ {
80
+ 'name': 'Features + Embeddings',
81
+ 'X': np.concatenate([X_features, X_emb_pca], axis=1),
82
+ },
83
+ {
84
+ 'name': 'Features + Emb. Local',
85
+ 'X': np.concatenate([X_features, X_emb_local_pca], axis=1),
86
+ },
87
+ ]
88
+
89
+ for cfg in configs:
90
+ print(f" {cfg['name']}: {cfg['X'].shape[1]} features")
91
+
92
+ def evaluate_lpocv_fast(X, y, proteins, n_estimators=100, max_depth=4):
93
+
94
+ unique_proteins = np.unique(proteins)
95
+ results = []
96
+
97
+ for protein in tqdm(unique_proteins, desc="LPOCV", leave=False):
98
+ test_mask = proteins == protein
99
+ train_mask = ~test_mask
100
+
101
+ n_test = test_mask.sum()
102
+ if n_test < 2:
103
+ continue
104
+
105
+ X_train, y_train = X[train_mask], y[train_mask]
106
+ X_test, y_test = X[test_mask], y[test_mask]
107
+
108
+ scaler = StandardScaler()
109
+ X_train_s = scaler.fit_transform(X_train)
110
+ X_test_s = scaler.transform(X_test)
111
+
112
+ model = GradientBoostingClassifier(
113
+ n_estimators=n_estimators,
114
+ max_depth=max_depth,
115
+ learning_rate=0.1,
116
+ min_samples_leaf=10,
117
+ subsample=0.8,
118
+ random_state=42
119
+ )
120
+ model.fit(X_train_s, y_train)
121
+
122
+ y_pred = model.predict_proba(X_test_s)[:, 1]
123
+
124
+ for pred, true in zip(y_pred, y_test):
125
+ results.append({'y_true': true, 'y_pred': pred})
126
+
127
+ df_res = pd.DataFrame(results)
128
+
129
+ if len(df_res) > 0 and len(df_res['y_true'].unique()) > 1:
130
+ auc_roc = roc_auc_score(df_res['y_true'], df_res['y_pred'])
131
+ auc_pr = average_precision_score(df_res['y_true'], df_res['y_pred'])
132
+ else:
133
+ auc_roc, auc_pr = 0, 0
134
+
135
+ return auc_roc, auc_pr, df_res
136
+
137
+ results_all = {}
138
+
139
+ for cfg in configs:
140
+ print(f"\n 📊 {cfg['name']}...")
141
+
142
+ auc_roc, auc_pr, df_res = evaluate_lpocv_fast(
143
+ cfg['X'], y, proteins,
144
+ n_estimators=100,
145
+ max_depth=4
146
+ )
147
+
148
+ results_all[cfg['name']] = {
149
+ 'auc_roc': auc_roc,
150
+ 'auc_pr': auc_pr,
151
+ 'predictions': df_res,
152
+ 'n_features': cfg['X'].shape[1],
153
+ }
154
+
155
+ print(f" AUC-ROC: {auc_roc:.4f}")
156
+ print(f" AUC-PR: {auc_pr:.4f}")
157
+
158
+
159
+ best_X = None
160
+ for cfg in configs:
161
+ if cfg['name'] == best_name:
162
+ best_X = cfg['X']
163
+ break
164
+
165
+ print(f" Entraînement: {best_name}...")
166
+
167
+ scaler_final = StandardScaler()
168
+ X_scaled = scaler_final.fit_transform(best_X)
169
+
170
+ model_final = GradientBoostingClassifier(
171
+ n_estimators=300,
172
+ max_depth=5,
173
+ learning_rate=0.05,
174
+ min_samples_leaf=10,
175
+ subsample=0.8,
176
+ random_state=42
177
+ )
178
+
179
+ model_final.fit(X_scaled, y)
180
+
181
+ if 'Features' in best_name:
182
+ importances = model_final.feature_importances_
183
+
184
+ if best_name == 'Features classiques':
185
+ imp_names = feature_cols
186
+ elif best_name == 'Features + Embeddings':
187
+ imp_names = feature_cols + [f'emb_pca_{i}' for i in range(X_emb_pca.shape[1])]
188
+ else:
189
+ imp_names = feature_cols + [f'emb_local_{i}' for i in range(X_emb_local_pca.shape[1])]
190
+
191
+ importance_df = pd.DataFrame({
192
+ 'feature': imp_names,
193
+ 'importance': importances
194
+ }).sort_values('importance', ascending=False)
195
+
196
+ print("\n Top 15 features:")
197
+ for _, row in importance_df.head(15).iterrows():
198
+ print(f" {row['importance']:.4f} {row['feature']}")
199
+
200
+ importance_df.to_csv(PATHS['results'] / 'feature_importances_best_model.csv', index=False)
201
+
202
+ model_data = {
203
+ 'model': model_final,
204
+ 'scaler': scaler_final,
205
+ 'pca_combined': pca_combined if 'Embeddings' in best_name and 'Local' not in best_name else None,
206
+ 'pca_local': pca_local if 'Local' in best_name else None,
207
+ 'feature_cols': feature_cols,
208
+ 'config_name': best_name,
209
+ 'metrics': {
210
+ 'auc_roc_lpocv': results_all[best_name]['auc_roc'],
211
+ 'auc_pr_lpocv': results_all[best_name]['auc_pr'],
212
+ },
213
+ }
214
+
215
+ with open(PATHS['models'] / 'model_best.pkl', 'wb') as f:
216
+ pickle.dump(model_data, f)
217
+
218
+ df_comparison.to_csv(PATHS['results'] / 'comparison_features_embeddings.csv', index=False)
scripts/phase1_freeze_and_classical_features.py.py ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled17.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1GwdSjrwh3f6QCzOa8KHr_XkWy0KmZvdV
8
+ """
9
+
10
+ import pandas as pd
11
+ import numpy as np
12
+ from pathlib import Path
13
+ from tqdm import tqdm
14
+ from datetime import datetime
15
+ import hashlib
16
+ import json
17
+
18
+
19
+ PATHS = {
20
+ 'data_processed': BASE_PATH / 'data' / 'processed',
21
+ 'data_frozen': BASE_PATH / 'data' / 'frozen',
22
+ 'features': BASE_PATH / 'features',
23
+ }
24
+
25
+ for path in PATHS.values():
26
+ path.mkdir(parents=True, exist_ok=True)
27
+
28
+ AA_PROPERTIES = {
29
+ 'A': {'hydro': 1.8, 'charge': 0, 'volume': 88.6, 'disorder': 0.06, 'aromatic': 0},
30
+ 'R': {'hydro': -4.5, 'charge': 1, 'volume': 173.4, 'disorder': 0.18, 'aromatic': 0},
31
+ 'N': {'hydro': -3.5, 'charge': 0, 'volume': 114.1, 'disorder': 0.14, 'aromatic': 0},
32
+ 'D': {'hydro': -3.5, 'charge': -1, 'volume': 111.1, 'disorder': 0.19, 'aromatic': 0},
33
+ 'C': {'hydro': 2.5, 'charge': 0, 'volume': 108.5, 'disorder': -0.02, 'aromatic': 0},
34
+ 'Q': {'hydro': -3.5, 'charge': 0, 'volume': 143.8, 'disorder': 0.16, 'aromatic': 0},
35
+ 'E': {'hydro': -3.5, 'charge': -1, 'volume': 138.4, 'disorder': 0.20, 'aromatic': 0},
36
+ 'G': {'hydro': -0.4, 'charge': 0, 'volume': 60.1, 'disorder': 0.17, 'aromatic': 0},
37
+ 'H': {'hydro': -3.2, 'charge': 0.5, 'volume': 153.2, 'disorder': 0.10, 'aromatic': 1},
38
+ 'I': {'hydro': 4.5, 'charge': 0, 'volume': 166.7, 'disorder': -0.49, 'aromatic': 0},
39
+ 'L': {'hydro': 3.8, 'charge': 0, 'volume': 166.7, 'disorder': -0.37, 'aromatic': 0},
40
+ 'K': {'hydro': -3.9, 'charge': 1, 'volume': 168.6, 'disorder': 0.21, 'aromatic': 0},
41
+ 'M': {'hydro': 1.9, 'charge': 0, 'volume': 162.9, 'disorder': -0.23, 'aromatic': 0},
42
+ 'F': {'hydro': 2.8, 'charge': 0, 'volume': 189.9, 'disorder': -0.41, 'aromatic': 1},
43
+ 'P': {'hydro': -1.6, 'charge': 0, 'volume': 112.7, 'disorder': 0.41, 'aromatic': 0},
44
+ 'S': {'hydro': -0.8, 'charge': 0, 'volume': 89.0, 'disorder': 0.13, 'aromatic': 0},
45
+ 'T': {'hydro': -0.7, 'charge': 0, 'volume': 116.1, 'disorder': 0.04, 'aromatic': 0},
46
+ 'W': {'hydro': -0.9, 'charge': 0, 'volume': 227.8, 'disorder': -0.35, 'aromatic': 1},
47
+ 'Y': {'hydro': -1.3, 'charge': 0, 'volume': 193.6, 'disorder': -0.26, 'aromatic': 1},
48
+ 'V': {'hydro': 4.2, 'charge': 0, 'volume': 140.0, 'disorder': -0.38, 'aromatic': 0},
49
+ }
50
+
51
+
52
+
53
+ df_full = pd.read_parquet(PATHS['data_processed'] / 'mutations_dataset_final.parquet')
54
+ print(f" Dataset complet: {len(df_full):,} mutations")
55
+
56
+ mito_strict_file = PATHS['data_processed'] / 'mutations_dataset_mito_strict.parquet'
57
+ if mito_strict_file.exists():
58
+ df_strict = pd.read_parquet(mito_strict_file)
59
+ else:
60
+ STRICT_MITO_GENES = {
61
+ 'OPA1', 'MFN1', 'MFN2', 'DNM1L', 'AFG3L2', 'SPG7', 'LONP1', 'CLPP', 'YME1L1',
62
+ 'NDUFAF1', 'NDUFAF2', 'NDUFAF3', 'NDUFAF4', 'NDUFAF5', 'NDUFAF6', 'NDUFAF7',
63
+ 'NUBPL', 'ACAD9', 'TIMMDC1', 'FOXRED1',
64
+ 'NDUFS1', 'NDUFS2', 'NDUFS3', 'NDUFS4', 'NDUFS6', 'NDUFS7', 'NDUFS8',
65
+ 'NDUFV1', 'NDUFV2', 'NDUFA1', 'NDUFA2', 'NDUFA9', 'NDUFA10', 'NDUFA11', 'NDUFA12', 'NDUFA13',
66
+ 'SDHA', 'SDHB', 'SDHC', 'SDHD', 'SDHAF1', 'SDHAF2',
67
+ 'BCS1L', 'TTC19', 'UQCRB', 'UQCRQ', 'UQCRC2', 'CYC1',
68
+ 'SURF1', 'SCO1', 'SCO2', 'COX10', 'COX14', 'COX15', 'COX20',
69
+ 'COA5', 'COA6', 'COA7', 'PET100', 'COX4I1', 'COX6A1', 'COX6B1', 'COX7B', 'COX8A',
70
+ 'ATP5F1A', 'ATP5F1D', 'ATP5F1E', 'TMEM70', 'ATPAF2',
71
+ 'TIMM50', 'TIMM8A', 'DNAJC19', 'AGK', 'TOMM20', 'TOMM40',
72
+ 'CHCHD2', 'CHCHD10', 'CHCHD4', 'AIFM1', 'COX17',
73
+ 'HSPA9', 'HSPD1', 'HSPE1', 'CLPB',
74
+ 'AARS2', 'DARS2', 'EARS2', 'FARS2', 'HARS2', 'IARS2', 'LARS2', 'MARS2',
75
+ 'NARS2', 'RARS2', 'SARS2', 'TARS2', 'VARS2', 'YARS2',
76
+ 'GFM1', 'TSFM', 'TUFM', 'C12orf65', 'RMND1', 'GTPBP3', 'MTO1', 'TRMU',
77
+ 'POLG', 'POLG2', 'TWNK', 'TFAM', 'RRM2B', 'MPV17', 'DGUOK', 'TK2',
78
+ 'SUCLA2', 'SUCLG1', 'FBXL4',
79
+ 'PDHA1', 'PDHB', 'PDHX', 'DLD', 'DLAT',
80
+ 'PC', 'PCCA', 'PCCB', 'MUT', 'MMAA', 'MMAB', 'MMACHC',
81
+ 'LIAS', 'LIPT1', 'BOLA3', 'NFU1', 'ISCA1', 'ISCA2', 'IBA57', 'GLRX5', 'FDXR',
82
+ 'COQ2', 'COQ4', 'COQ6', 'COQ7', 'COQ8A', 'COQ9', 'PDSS1', 'PDSS2',
83
+ 'SLC25A4', 'SLC25A3', 'SLC25A12', 'SLC25A13', 'SLC25A19', 'SLC25A22',
84
+ 'TAZ', 'SERAC1', 'LRPPRC', 'TACO1', 'ELAC2', 'TRNT1', 'PNPT1',
85
+ }
86
+ df_strict = df_full[df_full['gene_symbol'].isin(STRICT_MITO_GENES)].copy()
87
+
88
+
89
+ def compute_hash(df):
90
+ """Calculer un hash du dataset pour vérification d'intégrité"""
91
+ content = df.to_json()
92
+ return hashlib.md5(content.encode()).hexdigest()
93
+
94
+ freeze_metadata = {
95
+ 'freeze_date': datetime.now().isoformat(),
96
+ 'freeze_version': '1.0',
97
+ 'datasets': {
98
+ 'full': {
99
+ 'filename': 'mutations_dataset_final_FROZEN.parquet',
100
+ 'n_mutations': len(df_full),
101
+ 'n_pathogenic': int((df_full['label'] == 1).sum()),
102
+ 'n_benign': int((df_full['label'] == 0).sum()),
103
+ 'n_genes': int(df_full['gene_symbol'].nunique()),
104
+ 'hash': compute_hash(df_full),
105
+ },
106
+ 'mito_strict': {
107
+ 'filename': 'mutations_dataset_mito_strict_FROZEN.parquet',
108
+ 'n_mutations': len(df_strict),
109
+ 'n_pathogenic': int((df_strict['label'] == 1).sum()),
110
+ 'n_benign': int((df_strict['label'] == 0).sum()),
111
+ 'n_genes': int(df_strict['gene_symbol'].nunique()),
112
+ 'hash': compute_hash(df_strict),
113
+ }
114
+ },
115
+ 'note': 'FROZEN - DO NOT MODIFY LABELS AFTER THIS POINT'
116
+ }
117
+
118
+ df_full.to_parquet(PATHS['data_frozen'] / 'mutations_dataset_final_FROZEN.parquet')
119
+ df_strict.to_parquet(PATHS['data_frozen'] / 'mutations_dataset_mito_strict_FROZEN.parquet')
120
+
121
+ uniprot_file = PATHS['data_processed'].parent / 'raw' / 'uniprot_human_reviewed.parquet'
122
+
123
+ if uniprot_file.exists():
124
+ df_uniprot = pd.read_parquet(uniprot_file)
125
+ seq_dict = dict(zip(df_uniprot['accession'], df_uniprot['sequence']))
126
+ else:
127
+ import gzip
128
+ uniprot_gz = Path("")
129
+ with gzip.open(uniprot_gz, 'rt') as f:
130
+ df_uniprot = pd.read_csv(f, sep='\t', low_memory=False)
131
+ seq_dict = dict(zip(df_uniprot['Entry'], df_uniprot['Sequence']))
132
+
133
+
134
+
135
+ def extract_classical_features(row, seq_dict, window=15):
136
+ """
137
+ Extraire les features classiques IDP pour une mutation.
138
+
139
+ Features extraites (~45):
140
+ - Propriétés de substitution (delta)
141
+ - Contexte local (fenêtre ±window)
142
+ - Position dans la protéine
143
+ - Composition locale
144
+ - Indicateurs biologiques
145
+ """
146
+
147
+ acc = row['uniprot_acc']
148
+ pos = row['position']
149
+ wt = row['wt_aa']
150
+ mut = row['mut_aa']
151
+
152
+ seq = seq_dict.get(acc, '')
153
+
154
+ features = {}
155
+
156
+ if not seq or pos >= len(seq):
157
+ return None
158
+
159
+ wt_props = AA_PROPERTIES.get(wt, {})
160
+ mut_props = AA_PROPERTIES.get(mut, {})
161
+
162
+ features['delta_hydrophobicity'] = mut_props.get('hydro', 0) - wt_props.get('hydro', 0)
163
+ features['delta_charge'] = mut_props.get('charge', 0) - wt_props.get('charge', 0)
164
+ features['delta_volume'] = mut_props.get('volume', 0) - wt_props.get('volume', 0)
165
+ features['delta_disorder_propensity'] = mut_props.get('disorder', 0) - wt_props.get('disorder', 0)
166
+ features['delta_aromatic'] = mut_props.get('aromatic', 0) - wt_props.get('aromatic', 0)
167
+
168
+ features['abs_delta_hydro'] = abs(features['delta_hydrophobicity'])
169
+ features['abs_delta_charge'] = abs(features['delta_charge'])
170
+ features['abs_delta_volume'] = abs(features['delta_volume'])
171
+
172
+
173
+ start = max(0, pos - window)
174
+ end = min(len(seq), pos + window + 1)
175
+ local_seq = seq[start:end]
176
+
177
+ if len(local_seq) > 0:
178
+ features['local_hydro_mean'] = np.mean([AA_PROPERTIES.get(aa, {}).get('hydro', 0) for aa in local_seq])
179
+ features['local_charge_mean'] = np.mean([AA_PROPERTIES.get(aa, {}).get('charge', 0) for aa in local_seq])
180
+ features['local_disorder_mean'] = np.mean([AA_PROPERTIES.get(aa, {}).get('disorder', 0) for aa in local_seq])
181
+
182
+ features['local_charged_fraction'] = sum(1 for aa in local_seq if aa in 'RDEHK') / len(local_seq)
183
+ features['local_aromatic_fraction'] = sum(1 for aa in local_seq if aa in 'FWY') / len(local_seq)
184
+ features['local_proline_fraction'] = local_seq.count('P') / len(local_seq)
185
+ features['local_glycine_fraction'] = local_seq.count('G') / len(local_seq)
186
+ features['local_cysteine_fraction'] = local_seq.count('C') / len(local_seq)
187
+
188
+ disorder_promoting = set('AEGRQSKP')
189
+ order_promoting = set('WFYILMVC')
190
+ features['local_disorder_promoting'] = sum(1 for aa in local_seq if aa in disorder_promoting) / len(local_seq)
191
+ features['local_order_promoting'] = sum(1 for aa in local_seq if aa in order_promoting) / len(local_seq)
192
+ else:
193
+ for key in ['local_hydro_mean', 'local_charge_mean', 'local_disorder_mean',
194
+ 'local_charged_fraction', 'local_aromatic_fraction', 'local_proline_fraction',
195
+ 'local_glycine_fraction', 'local_cysteine_fraction',
196
+ 'local_disorder_promoting', 'local_order_promoting']:
197
+ features[key] = 0
198
+
199
+
200
+ prot_len = len(seq)
201
+
202
+ features['position_absolute'] = pos
203
+ features['position_normalized'] = pos / prot_len if prot_len > 0 else 0
204
+ features['protein_length'] = prot_len
205
+
206
+ features['is_n_terminal'] = 1 if pos < 50 else 0
207
+ features['is_c_terminal'] = 1 if pos > prot_len - 50 else 0
208
+ features['distance_to_n_term'] = pos
209
+ features['distance_to_c_term'] = prot_len - pos - 1
210
+
211
+
212
+
213
+ features['protein_cysteine_count'] = seq.count('C')
214
+ features['protein_cysteine_fraction'] = seq.count('C') / prot_len if prot_len > 0 else 0
215
+ features['protein_charged_fraction'] = sum(1 for aa in seq if aa in 'RDEHK') / prot_len if prot_len > 0 else 0
216
+ features['protein_disorder_mean'] = np.mean([AA_PROPERTIES.get(aa, {}).get('disorder', 0) for aa in seq])
217
+
218
+ features['cysteine_gained'] = 1 if mut == 'C' else 0
219
+ features['cysteine_lost'] = 1 if wt == 'C' else 0
220
+ features['cysteine_change'] = features['cysteine_gained'] - features['cysteine_lost']
221
+
222
+ features['nearby_cysteine_count'] = local_seq.count('C') - (1 if wt == 'C' else 0)
223
+ features['cysteine_in_cys_rich_region'] = 1 if features['local_cysteine_fraction'] > 0.05 else 0
224
+
225
+
226
+ features['charge_introducing'] = 1 if wt_props.get('charge', 0) == 0 and mut_props.get('charge', 0) != 0 else 0
227
+ features['charge_removing'] = 1 if wt_props.get('charge', 0) != 0 and mut_props.get('charge', 0) == 0 else 0
228
+ features['charge_reversing'] = 1 if wt_props.get('charge', 0) * mut_props.get('charge', 0) < 0 else 0
229
+
230
+
231
+
232
+ features['proline_introduced'] = 1 if mut == 'P' and wt != 'P' else 0
233
+ features['proline_removed'] = 1 if wt == 'P' and mut != 'P' else 0
234
+ features['glycine_introduced'] = 1 if mut == 'G' and wt != 'G' else 0
235
+ features['glycine_removed'] = 1 if wt == 'G' and mut != 'G' else 0
236
+
237
+
238
+
239
+ features['idp_disruption_score'] = (
240
+ abs(features['delta_disorder_propensity']) * 2 +
241
+ abs(features['delta_charge']) * 1.5 +
242
+ features['proline_introduced'] * 2 +
243
+ features['proline_removed'] * 1
244
+ )
245
+
246
+ features['ros_vulnerability_score'] = (
247
+ features['cysteine_lost'] * 3 +
248
+ features['cysteine_gained'] * 1 +
249
+ features['cysteine_in_cys_rich_region'] * 2 +
250
+ (1 if features['protein_cysteine_fraction'] > 0.03 else 0) * 1
251
+ )
252
+
253
+ features['import_disruption_score'] = (
254
+ features['is_n_terminal'] * 2 +
255
+ features['charge_reversing'] * (2 if pos < 50 else 0) +
256
+ abs(features['delta_hydrophobicity']) * (1 if pos < 30 else 0)
257
+ )
258
+
259
+ return features
260
+
261
+
262
+ features_list = []
263
+ failed = 0
264
+
265
+ for idx, row in tqdm(df_full.iterrows(), total=len(df_full), desc="Features"):
266
+ feats = extract_classical_features(row, seq_dict)
267
+
268
+ if feats:
269
+ feats['mutation_idx'] = idx
270
+ feats['uniprot_acc'] = row['uniprot_acc']
271
+ feats['gene_symbol'] = row['gene_symbol']
272
+ feats['position'] = row['position']
273
+ feats['wt_aa'] = row['wt_aa']
274
+ feats['mut_aa'] = row['mut_aa']
275
+ feats['label'] = row['label']
276
+ features_list.append(feats)
277
+ else:
278
+ failed += 1
279
+
280
+ df_features_full = pd.DataFrame(features_list)
281
+
282
+ print(f"\n Features extraites: {len(df_features_full):,}")
283
+ print(f" Échecs: {failed}")
284
+
285
+
286
+ features_list_strict = []
287
+
288
+ for idx, row in tqdm(df_strict.iterrows(), total=len(df_strict), desc="Features strict"):
289
+ feats = extract_classical_features(row, seq_dict)
290
+
291
+ if feats:
292
+ feats['mutation_idx'] = idx
293
+ feats['uniprot_acc'] = row['uniprot_acc']
294
+ feats['gene_symbol'] = row['gene_symbol']
295
+ feats['position'] = row['position']
296
+ feats['wt_aa'] = row['wt_aa']
297
+ feats['mut_aa'] = row['mut_aa']
298
+ feats['label'] = row['label']
299
+ features_list_strict.append(feats)
300
+
301
+ df_features_strict = pd.DataFrame(features_list_strict)
302
+
303
+
304
+ df_features_full.to_parquet(PATHS['features'] / 'features_classical_full.parquet')
305
+ df_features_strict.to_parquet(PATHS['features'] / 'features_classical_mito_strict.parquet')
306
+
307
+ feature_cols = [c for c in df_features_full.columns if c not in
308
+ ['mutation_idx', 'uniprot_acc', 'gene_symbol', 'position', 'wt_aa', 'mut_aa', 'label']]
scripts/train_baseline_classical_model.py.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled17.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1GwdSjrwh3f6QCzOa8KHr_XkWy0KmZvdV
8
+ """
9
+
10
+ import pandas as pd
11
+ import numpy as np
12
+ from pathlib import Path
13
+ from math import log2
14
+ from tqdm import tqdm
15
+ from sklearn.ensemble import GradientBoostingClassifier
16
+ from sklearn.preprocessing import StandardScaler
17
+ from sklearn.metrics import roc_auc_score, average_precision_score, classification_report
18
+ import warnings
19
+ warnings.filterwarnings('ignore')
20
+
21
+ PATHS = {
22
+ 'data_frozen': BASE_PATH / 'data' / 'frozen',
23
+ 'features': BASE_PATH / 'features',
24
+ 'models': BASE_PATH / 'models',
25
+ 'results': BASE_PATH / 'results',
26
+ }
27
+
28
+ for path in PATHS.values():
29
+ path.mkdir(parents=True, exist_ok=True)
30
+
31
+ def shannon_entropy(seq):
32
+ """Calculer l'entropie de Shannon d'une séquence"""
33
+ if not seq or len(seq) == 0:
34
+ return 0.0
35
+ probs = [seq.count(aa)/len(seq) for aa in set(seq)]
36
+ return -sum(p * log2(p) for p in probs if p > 0)
37
+
38
+ df_features_full = pd.read_parquet(PATHS['features'] / 'features_classical_full.parquet')
39
+ df_features_strict = pd.read_parquet(PATHS['features'] / 'features_classical_mito_strict.parquet')
40
+
41
+ print(f" Features chargées (full): {len(df_features_full):,}")
42
+ print(f" Features chargées (strict): {len(df_features_strict):,}")
43
+
44
+ uniprot_file = BASE_PATH / 'data' / 'raw' / 'uniprot_human_reviewed.parquet'
45
+ df_uniprot = pd.read_parquet(uniprot_file)
46
+ seq_dict = dict(zip(df_uniprot['accession'], df_uniprot['sequence']))
47
+
48
+ print(f" Séquences: {len(seq_dict):,}")
49
+
50
+ def add_entropy_feature(df, seq_dict, window=15):
51
+ """Ajouter la feature d'entropie locale"""
52
+ entropies = []
53
+
54
+ for _, row in tqdm(df.iterrows(), total=len(df), desc="Entropie"):
55
+ seq = seq_dict.get(row['uniprot_acc'], '')
56
+ pos = row['position']
57
+
58
+ if seq and 0 <= pos < len(seq):
59
+ start = max(0, pos - window)
60
+ end = min(len(seq), pos + window + 1)
61
+ local_seq = seq[start:end]
62
+ entropies.append(shannon_entropy(local_seq))
63
+ else:
64
+ entropies.append(0.0)
65
+
66
+ df['local_sequence_entropy'] = entropies
67
+ return df
68
+
69
+ print("\n Ajout de l'entropie locale...")
70
+ df_features_full = add_entropy_feature(df_features_full, seq_dict)
71
+ df_features_strict = add_entropy_feature(df_features_strict, seq_dict)
72
+
73
+ df_features_full.to_parquet(PATHS['features'] / 'features_classical_full.parquet')
74
+ df_features_strict.to_parquet(PATHS['features'] / 'features_classical_mito_strict.parquet')
75
+
76
+
77
+
78
+
79
+ id_cols = ['mutation_idx', 'uniprot_acc', 'gene_symbol', 'position', 'wt_aa', 'mut_aa', 'label']
80
+ feature_cols = [c for c in df_features_full.columns if c not in id_cols]
81
+
82
+ print(f" Features: {len(feature_cols)}")
83
+
84
+ X_full = df_features_full[feature_cols].values
85
+ y_full = df_features_full['label'].values
86
+
87
+ X_strict = df_features_strict[feature_cols].values
88
+ y_strict = df_features_strict['label'].values
89
+
90
+ print(f" X shape: {X_full.shape}")
91
+ print(f" y: {np.sum(y_full==1)} pathogènes, {np.sum(y_full==0)} bénins")
92
+
93
+ print(f" X shape: {X_strict.shape}")
94
+ print(f" y: {np.sum(y_strict==1)} pathogènes, {np.sum(y_strict==0)} bénins")
95
+
96
+ X_full = np.nan_to_num(X_full, nan=0.0, posinf=0.0, neginf=0.0)
97
+ X_strict = np.nan_to_num(X_strict, nan=0.0, posinf=0.0, neginf=0.0)
98
+
99
+
100
+
101
+ from sklearn.model_selection import train_test_split
102
+
103
+ X_train, X_test, y_train, y_test = train_test_split(
104
+ X_full, y_full, test_size=0.2, random_state=42, stratify=y_full
105
+ )
106
+
107
+ print(f" Train: {len(X_train)} ({np.sum(y_train==1)} patho)")
108
+ print(f" Test: {len(X_test)} ({np.sum(y_test==1)} patho)")
109
+
110
+ scaler = StandardScaler()
111
+ X_train_scaled = scaler.fit_transform(X_train)
112
+ X_test_scaled = scaler.transform(X_test)
113
+
114
+
115
+ model = GradientBoostingClassifier(
116
+ n_estimators=300,
117
+ max_depth=5,
118
+ learning_rate=0.05,
119
+ min_samples_leaf=10,
120
+ subsample=0.8,
121
+ random_state=42,
122
+ verbose=0
123
+ )
124
+
125
+ model.fit(X_train_scaled, y_train)
126
+ y_pred_proba = model.predict_proba(X_test_scaled)[:, 1]
127
+ y_pred = model.predict(X_test_scaled)
128
+
129
+ auc_roc = roc_auc_score(y_test, y_pred_proba)
130
+ auc_pr = average_precision_score(y_test, y_pred_proba)
131
+
132
+ print(f" AUC-ROC: {auc_roc:.4f}")
133
+ print(f" AUC-PR: {auc_pr:.4f}")
134
+
135
+ print(classification_report(y_test, y_pred, target_names=['Bénin', 'Pathogène']))
136
+
137
+
138
+ proteins = df_features_full['uniprot_acc'].unique()
139
+ print(f" Protéines uniques: {len(proteins)}")
140
+
141
+ lpocv_results = []
142
+ proteins_evaluated = 0
143
+
144
+
145
+ for protein in tqdm(proteins, desc="LPOCV"):
146
+ test_mask = df_features_full['uniprot_acc'] == protein
147
+ train_mask = ~test_mask
148
+
149
+ if test_mask.sum() < 2:
150
+ continue
151
+
152
+ X_train_lpo = X_full[train_mask]
153
+ y_train_lpo = y_full[train_mask]
154
+ X_test_lpo = X_full[test_mask]
155
+ y_test_lpo = y_full[test_mask]
156
+
157
+ if len(np.unique(y_test_lpo)) < 2:
158
+ pass
159
+
160
+ scaler_lpo = StandardScaler()
161
+ X_train_lpo_scaled = scaler_lpo.fit_transform(X_train_lpo)
162
+ X_test_lpo_scaled = scaler_lpo.transform(X_test_lpo)
163
+
164
+ model_lpo = GradientBoostingClassifier(
165
+ n_estimators=100,
166
+ max_depth=4,
167
+ learning_rate=0.1,
168
+ min_samples_leaf=10,
169
+ random_state=42,
170
+ verbose=0
171
+ )
172
+
173
+ model_lpo.fit(X_train_lpo_scaled, y_train_lpo)
174
+
175
+ y_pred_lpo = model_lpo.predict_proba(X_test_lpo_scaled)[:, 1]
176
+
177
+ for i, (pred, true) in enumerate(zip(y_pred_lpo, y_test_lpo)):
178
+ lpocv_results.append({
179
+ 'protein': protein,
180
+ 'y_true': true,
181
+ 'y_pred_proba': pred,
182
+ })
183
+
184
+ proteins_evaluated += 1
185
+
186
+ df_lpocv = pd.DataFrame(lpocv_results)
187
+
188
+
189
+ if len(df_lpocv) > 0 and len(df_lpocv['y_true'].unique()) > 1:
190
+ auc_roc_lpocv = roc_auc_score(df_lpocv['y_true'], df_lpocv['y_pred_proba'])
191
+ auc_pr_lpocv = average_precision_score(df_lpocv['y_true'], df_lpocv['y_pred_proba'])
192
+
193
+ print(f"\n 📊 RÉSULTATS LPOCV:")
194
+ print(f" AUC-ROC: {auc_roc_lpocv:.4f}")
195
+ print(f" AUC-PR: {auc_pr_lpocv:.4f}")
196
+ else:
197
+ auc_roc_lpocv = 0
198
+ auc_pr_lpocv = 0
199
+
200
+
201
+ scaler_final = StandardScaler()
202
+ X_full_scaled = scaler_final.fit_transform(X_full)
203
+
204
+ model_final = GradientBoostingClassifier(
205
+ n_estimators=300,
206
+ max_depth=5,
207
+ learning_rate=0.05,
208
+ min_samples_leaf=10,
209
+ subsample=0.8,
210
+ random_state=42,
211
+ verbose=0
212
+ )
213
+
214
+ model_final.fit(X_full_scaled, y_full)
215
+
216
+ importances = model_final.feature_importances_
217
+ importance_df = pd.DataFrame({
218
+ 'feature': feature_cols,
219
+ 'importance': importances
220
+ }).sort_values('importance', ascending=False)
221
+ for i, row in importance_df.head(20).iterrows():
222
+ print(f" {row['importance']:.4f} {row['feature']}")
223
+
224
+ importance_df.to_csv(PATHS['results'] / 'feature_importances_classical.csv', index=False)
225
+
226
+
227
+ import pickle
228
+
229
+ model_data = {
230
+ 'model': model_final,
231
+ 'scaler': scaler_final,
232
+ 'feature_cols': feature_cols,
233
+ 'metrics': {
234
+ 'auc_roc_split': auc_roc,
235
+ 'auc_pr_split': auc_pr,
236
+ 'auc_roc_lpocv': auc_roc_lpocv,
237
+ 'auc_pr_lpocv': auc_pr_lpocv,
238
+ },
239
+ 'n_samples': len(X_full),
240
+ 'n_features': len(feature_cols),
241
+ }
242
+
243
+ with open(PATHS['models'] / 'model_classical_baseline.pkl', 'wb') as f:
244
+ pickle.dump(model_data, f)
245
+
246
+
247
+ df_lpocv.to_parquet(PATHS['results'] / 'lpocv_predictions.parquet')