| |
| """Untitled17.ipynb |
| |
| Automatically generated by Colab. |
| |
| Original file is located at |
| https://colab.research.google.com/drive/1GwdSjrwh3f6QCzOa8KHr_XkWy0KmZvdV |
| """ |
|
|
| import pandas as pd |
| import numpy as np |
| from pathlib import Path |
| from tqdm import tqdm |
| from datetime import datetime |
| import hashlib |
| import json |
|
|
|
|
| PATHS = { |
| 'data_processed': BASE_PATH / 'data' / 'processed', |
| 'data_frozen': BASE_PATH / 'data' / 'frozen', |
| 'features': BASE_PATH / 'features', |
| } |
|
|
| for path in PATHS.values(): |
| path.mkdir(parents=True, exist_ok=True) |
|
|
| AA_PROPERTIES = { |
| 'A': {'hydro': 1.8, 'charge': 0, 'volume': 88.6, 'disorder': 0.06, 'aromatic': 0}, |
| 'R': {'hydro': -4.5, 'charge': 1, 'volume': 173.4, 'disorder': 0.18, 'aromatic': 0}, |
| 'N': {'hydro': -3.5, 'charge': 0, 'volume': 114.1, 'disorder': 0.14, 'aromatic': 0}, |
| 'D': {'hydro': -3.5, 'charge': -1, 'volume': 111.1, 'disorder': 0.19, 'aromatic': 0}, |
| 'C': {'hydro': 2.5, 'charge': 0, 'volume': 108.5, 'disorder': -0.02, 'aromatic': 0}, |
| 'Q': {'hydro': -3.5, 'charge': 0, 'volume': 143.8, 'disorder': 0.16, 'aromatic': 0}, |
| 'E': {'hydro': -3.5, 'charge': -1, 'volume': 138.4, 'disorder': 0.20, 'aromatic': 0}, |
| 'G': {'hydro': -0.4, 'charge': 0, 'volume': 60.1, 'disorder': 0.17, 'aromatic': 0}, |
| 'H': {'hydro': -3.2, 'charge': 0.5, 'volume': 153.2, 'disorder': 0.10, 'aromatic': 1}, |
| 'I': {'hydro': 4.5, 'charge': 0, 'volume': 166.7, 'disorder': -0.49, 'aromatic': 0}, |
| 'L': {'hydro': 3.8, 'charge': 0, 'volume': 166.7, 'disorder': -0.37, 'aromatic': 0}, |
| 'K': {'hydro': -3.9, 'charge': 1, 'volume': 168.6, 'disorder': 0.21, 'aromatic': 0}, |
| 'M': {'hydro': 1.9, 'charge': 0, 'volume': 162.9, 'disorder': -0.23, 'aromatic': 0}, |
| 'F': {'hydro': 2.8, 'charge': 0, 'volume': 189.9, 'disorder': -0.41, 'aromatic': 1}, |
| 'P': {'hydro': -1.6, 'charge': 0, 'volume': 112.7, 'disorder': 0.41, 'aromatic': 0}, |
| 'S': {'hydro': -0.8, 'charge': 0, 'volume': 89.0, 'disorder': 0.13, 'aromatic': 0}, |
| 'T': {'hydro': -0.7, 'charge': 0, 'volume': 116.1, 'disorder': 0.04, 'aromatic': 0}, |
| 'W': {'hydro': -0.9, 'charge': 0, 'volume': 227.8, 'disorder': -0.35, 'aromatic': 1}, |
| 'Y': {'hydro': -1.3, 'charge': 0, 'volume': 193.6, 'disorder': -0.26, 'aromatic': 1}, |
| 'V': {'hydro': 4.2, 'charge': 0, 'volume': 140.0, 'disorder': -0.38, 'aromatic': 0}, |
| } |
|
|
|
|
|
|
| df_full = pd.read_parquet(PATHS['data_processed'] / 'mutations_dataset_final.parquet') |
| print(f" Dataset complet: {len(df_full):,} mutations") |
|
|
| mito_strict_file = PATHS['data_processed'] / 'mutations_dataset_mito_strict.parquet' |
| if mito_strict_file.exists(): |
| df_strict = pd.read_parquet(mito_strict_file) |
| else: |
| STRICT_MITO_GENES = { |
| 'OPA1', 'MFN1', 'MFN2', 'DNM1L', 'AFG3L2', 'SPG7', 'LONP1', 'CLPP', 'YME1L1', |
| 'NDUFAF1', 'NDUFAF2', 'NDUFAF3', 'NDUFAF4', 'NDUFAF5', 'NDUFAF6', 'NDUFAF7', |
| 'NUBPL', 'ACAD9', 'TIMMDC1', 'FOXRED1', |
| 'NDUFS1', 'NDUFS2', 'NDUFS3', 'NDUFS4', 'NDUFS6', 'NDUFS7', 'NDUFS8', |
| 'NDUFV1', 'NDUFV2', 'NDUFA1', 'NDUFA2', 'NDUFA9', 'NDUFA10', 'NDUFA11', 'NDUFA12', 'NDUFA13', |
| 'SDHA', 'SDHB', 'SDHC', 'SDHD', 'SDHAF1', 'SDHAF2', |
| 'BCS1L', 'TTC19', 'UQCRB', 'UQCRQ', 'UQCRC2', 'CYC1', |
| 'SURF1', 'SCO1', 'SCO2', 'COX10', 'COX14', 'COX15', 'COX20', |
| 'COA5', 'COA6', 'COA7', 'PET100', 'COX4I1', 'COX6A1', 'COX6B1', 'COX7B', 'COX8A', |
| 'ATP5F1A', 'ATP5F1D', 'ATP5F1E', 'TMEM70', 'ATPAF2', |
| 'TIMM50', 'TIMM8A', 'DNAJC19', 'AGK', 'TOMM20', 'TOMM40', |
| 'CHCHD2', 'CHCHD10', 'CHCHD4', 'AIFM1', 'COX17', |
| 'HSPA9', 'HSPD1', 'HSPE1', 'CLPB', |
| 'AARS2', 'DARS2', 'EARS2', 'FARS2', 'HARS2', 'IARS2', 'LARS2', 'MARS2', |
| 'NARS2', 'RARS2', 'SARS2', 'TARS2', 'VARS2', 'YARS2', |
| 'GFM1', 'TSFM', 'TUFM', 'C12orf65', 'RMND1', 'GTPBP3', 'MTO1', 'TRMU', |
| 'POLG', 'POLG2', 'TWNK', 'TFAM', 'RRM2B', 'MPV17', 'DGUOK', 'TK2', |
| 'SUCLA2', 'SUCLG1', 'FBXL4', |
| 'PDHA1', 'PDHB', 'PDHX', 'DLD', 'DLAT', |
| 'PC', 'PCCA', 'PCCB', 'MUT', 'MMAA', 'MMAB', 'MMACHC', |
| 'LIAS', 'LIPT1', 'BOLA3', 'NFU1', 'ISCA1', 'ISCA2', 'IBA57', 'GLRX5', 'FDXR', |
| 'COQ2', 'COQ4', 'COQ6', 'COQ7', 'COQ8A', 'COQ9', 'PDSS1', 'PDSS2', |
| 'SLC25A4', 'SLC25A3', 'SLC25A12', 'SLC25A13', 'SLC25A19', 'SLC25A22', |
| 'TAZ', 'SERAC1', 'LRPPRC', 'TACO1', 'ELAC2', 'TRNT1', 'PNPT1', |
| } |
| df_strict = df_full[df_full['gene_symbol'].isin(STRICT_MITO_GENES)].copy() |
|
|
|
|
| def compute_hash(df): |
| """Calculer un hash du dataset pour vérification d'intégrité""" |
| content = df.to_json() |
| return hashlib.md5(content.encode()).hexdigest() |
|
|
| freeze_metadata = { |
| 'freeze_date': datetime.now().isoformat(), |
| 'freeze_version': '1.0', |
| 'datasets': { |
| 'full': { |
| 'filename': 'mutations_dataset_final_FROZEN.parquet', |
| 'n_mutations': len(df_full), |
| 'n_pathogenic': int((df_full['label'] == 1).sum()), |
| 'n_benign': int((df_full['label'] == 0).sum()), |
| 'n_genes': int(df_full['gene_symbol'].nunique()), |
| 'hash': compute_hash(df_full), |
| }, |
| 'mito_strict': { |
| 'filename': 'mutations_dataset_mito_strict_FROZEN.parquet', |
| 'n_mutations': len(df_strict), |
| 'n_pathogenic': int((df_strict['label'] == 1).sum()), |
| 'n_benign': int((df_strict['label'] == 0).sum()), |
| 'n_genes': int(df_strict['gene_symbol'].nunique()), |
| 'hash': compute_hash(df_strict), |
| } |
| }, |
| 'note': 'FROZEN - DO NOT MODIFY LABELS AFTER THIS POINT' |
| } |
|
|
| df_full.to_parquet(PATHS['data_frozen'] / 'mutations_dataset_final_FROZEN.parquet') |
| df_strict.to_parquet(PATHS['data_frozen'] / 'mutations_dataset_mito_strict_FROZEN.parquet') |
|
|
| uniprot_file = PATHS['data_processed'].parent / 'raw' / 'uniprot_human_reviewed.parquet' |
|
|
| if uniprot_file.exists(): |
| df_uniprot = pd.read_parquet(uniprot_file) |
| seq_dict = dict(zip(df_uniprot['accession'], df_uniprot['sequence'])) |
| else: |
| import gzip |
| uniprot_gz = Path("") |
| with gzip.open(uniprot_gz, 'rt') as f: |
| df_uniprot = pd.read_csv(f, sep='\t', low_memory=False) |
| seq_dict = dict(zip(df_uniprot['Entry'], df_uniprot['Sequence'])) |
|
|
|
|
|
|
| def extract_classical_features(row, seq_dict, window=15): |
| """ |
| Extraire les features classiques IDP pour une mutation. |
| |
| Features extraites (~45): |
| - Propriétés de substitution (delta) |
| - Contexte local (fenêtre ±window) |
| - Position dans la protéine |
| - Composition locale |
| - Indicateurs biologiques |
| """ |
|
|
| acc = row['uniprot_acc'] |
| pos = row['position'] |
| wt = row['wt_aa'] |
| mut = row['mut_aa'] |
|
|
| seq = seq_dict.get(acc, '') |
|
|
| features = {} |
|
|
| if not seq or pos >= len(seq): |
| return None |
|
|
| wt_props = AA_PROPERTIES.get(wt, {}) |
| mut_props = AA_PROPERTIES.get(mut, {}) |
|
|
| features['delta_hydrophobicity'] = mut_props.get('hydro', 0) - wt_props.get('hydro', 0) |
| features['delta_charge'] = mut_props.get('charge', 0) - wt_props.get('charge', 0) |
| features['delta_volume'] = mut_props.get('volume', 0) - wt_props.get('volume', 0) |
| features['delta_disorder_propensity'] = mut_props.get('disorder', 0) - wt_props.get('disorder', 0) |
| features['delta_aromatic'] = mut_props.get('aromatic', 0) - wt_props.get('aromatic', 0) |
|
|
| features['abs_delta_hydro'] = abs(features['delta_hydrophobicity']) |
| features['abs_delta_charge'] = abs(features['delta_charge']) |
| features['abs_delta_volume'] = abs(features['delta_volume']) |
|
|
|
|
| start = max(0, pos - window) |
| end = min(len(seq), pos + window + 1) |
| local_seq = seq[start:end] |
|
|
| if len(local_seq) > 0: |
| features['local_hydro_mean'] = np.mean([AA_PROPERTIES.get(aa, {}).get('hydro', 0) for aa in local_seq]) |
| features['local_charge_mean'] = np.mean([AA_PROPERTIES.get(aa, {}).get('charge', 0) for aa in local_seq]) |
| features['local_disorder_mean'] = np.mean([AA_PROPERTIES.get(aa, {}).get('disorder', 0) for aa in local_seq]) |
|
|
| features['local_charged_fraction'] = sum(1 for aa in local_seq if aa in 'RDEHK') / len(local_seq) |
| features['local_aromatic_fraction'] = sum(1 for aa in local_seq if aa in 'FWY') / len(local_seq) |
| features['local_proline_fraction'] = local_seq.count('P') / len(local_seq) |
| features['local_glycine_fraction'] = local_seq.count('G') / len(local_seq) |
| features['local_cysteine_fraction'] = local_seq.count('C') / len(local_seq) |
|
|
| disorder_promoting = set('AEGRQSKP') |
| order_promoting = set('WFYILMVC') |
| features['local_disorder_promoting'] = sum(1 for aa in local_seq if aa in disorder_promoting) / len(local_seq) |
| features['local_order_promoting'] = sum(1 for aa in local_seq if aa in order_promoting) / len(local_seq) |
| else: |
| for key in ['local_hydro_mean', 'local_charge_mean', 'local_disorder_mean', |
| 'local_charged_fraction', 'local_aromatic_fraction', 'local_proline_fraction', |
| 'local_glycine_fraction', 'local_cysteine_fraction', |
| 'local_disorder_promoting', 'local_order_promoting']: |
| features[key] = 0 |
|
|
|
|
| prot_len = len(seq) |
|
|
| features['position_absolute'] = pos |
| features['position_normalized'] = pos / prot_len if prot_len > 0 else 0 |
| features['protein_length'] = prot_len |
|
|
| features['is_n_terminal'] = 1 if pos < 50 else 0 |
| features['is_c_terminal'] = 1 if pos > prot_len - 50 else 0 |
| features['distance_to_n_term'] = pos |
| features['distance_to_c_term'] = prot_len - pos - 1 |
|
|
|
|
|
|
| features['protein_cysteine_count'] = seq.count('C') |
| features['protein_cysteine_fraction'] = seq.count('C') / prot_len if prot_len > 0 else 0 |
| features['protein_charged_fraction'] = sum(1 for aa in seq if aa in 'RDEHK') / prot_len if prot_len > 0 else 0 |
| features['protein_disorder_mean'] = np.mean([AA_PROPERTIES.get(aa, {}).get('disorder', 0) for aa in seq]) |
|
|
| features['cysteine_gained'] = 1 if mut == 'C' else 0 |
| features['cysteine_lost'] = 1 if wt == 'C' else 0 |
| features['cysteine_change'] = features['cysteine_gained'] - features['cysteine_lost'] |
|
|
| features['nearby_cysteine_count'] = local_seq.count('C') - (1 if wt == 'C' else 0) |
| features['cysteine_in_cys_rich_region'] = 1 if features['local_cysteine_fraction'] > 0.05 else 0 |
|
|
|
|
| features['charge_introducing'] = 1 if wt_props.get('charge', 0) == 0 and mut_props.get('charge', 0) != 0 else 0 |
| features['charge_removing'] = 1 if wt_props.get('charge', 0) != 0 and mut_props.get('charge', 0) == 0 else 0 |
| features['charge_reversing'] = 1 if wt_props.get('charge', 0) * mut_props.get('charge', 0) < 0 else 0 |
|
|
|
|
|
|
| features['proline_introduced'] = 1 if mut == 'P' and wt != 'P' else 0 |
| features['proline_removed'] = 1 if wt == 'P' and mut != 'P' else 0 |
| features['glycine_introduced'] = 1 if mut == 'G' and wt != 'G' else 0 |
| features['glycine_removed'] = 1 if wt == 'G' and mut != 'G' else 0 |
|
|
|
|
|
|
| features['idp_disruption_score'] = ( |
| abs(features['delta_disorder_propensity']) * 2 + |
| abs(features['delta_charge']) * 1.5 + |
| features['proline_introduced'] * 2 + |
| features['proline_removed'] * 1 |
| ) |
|
|
| features['ros_vulnerability_score'] = ( |
| features['cysteine_lost'] * 3 + |
| features['cysteine_gained'] * 1 + |
| features['cysteine_in_cys_rich_region'] * 2 + |
| (1 if features['protein_cysteine_fraction'] > 0.03 else 0) * 1 |
| ) |
|
|
| features['import_disruption_score'] = ( |
| features['is_n_terminal'] * 2 + |
| features['charge_reversing'] * (2 if pos < 50 else 0) + |
| abs(features['delta_hydrophobicity']) * (1 if pos < 30 else 0) |
| ) |
|
|
| return features |
|
|
|
|
| features_list = [] |
| failed = 0 |
|
|
| for idx, row in tqdm(df_full.iterrows(), total=len(df_full), desc="Features"): |
| feats = extract_classical_features(row, seq_dict) |
|
|
| if feats: |
| feats['mutation_idx'] = idx |
| feats['uniprot_acc'] = row['uniprot_acc'] |
| feats['gene_symbol'] = row['gene_symbol'] |
| feats['position'] = row['position'] |
| feats['wt_aa'] = row['wt_aa'] |
| feats['mut_aa'] = row['mut_aa'] |
| feats['label'] = row['label'] |
| features_list.append(feats) |
| else: |
| failed += 1 |
|
|
| df_features_full = pd.DataFrame(features_list) |
|
|
| print(f"\n Features extraites: {len(df_features_full):,}") |
| print(f" Échecs: {failed}") |
|
|
|
|
| features_list_strict = [] |
|
|
| for idx, row in tqdm(df_strict.iterrows(), total=len(df_strict), desc="Features strict"): |
| feats = extract_classical_features(row, seq_dict) |
|
|
| if feats: |
| feats['mutation_idx'] = idx |
| feats['uniprot_acc'] = row['uniprot_acc'] |
| feats['gene_symbol'] = row['gene_symbol'] |
| feats['position'] = row['position'] |
| feats['wt_aa'] = row['wt_aa'] |
| feats['mut_aa'] = row['mut_aa'] |
| feats['label'] = row['label'] |
| features_list_strict.append(feats) |
|
|
| df_features_strict = pd.DataFrame(features_list_strict) |
|
|
|
|
| df_features_full.to_parquet(PATHS['features'] / 'features_classical_full.parquet') |
| df_features_strict.to_parquet(PATHS['features'] / 'features_classical_mito_strict.parquet') |
|
|
| feature_cols = [c for c in df_features_full.columns if c not in |
| ['mutation_idx', 'uniprot_acc', 'gene_symbol', 'position', 'wt_aa', 'mut_aa', 'label']] |