| |
| """Untitled17.ipynb |
| |
| Automatically generated by Colab. |
| |
| Original file is located at |
| https://colab.research.google.com/drive/1GwdSjrwh3f6QCzOa8KHr_XkWy0KmZvdV |
| """ |
|
|
| import requests |
| import json |
| import gzip |
| import time |
| import pandas as pd |
| from io import StringIO, BytesIO |
| from tqdm.auto import tqdm |
| import pickle |
|
|
| class DisProtDownloader: |
|
|
| BASE_URL = "https://disprot.org/api" |
|
|
| def __init__(self, save_dir: Path): |
| self.save_dir = save_dir |
| self.save_dir.mkdir(parents=True, exist_ok=True) |
|
|
| def download_all(self) -> pd.DataFrame: |
| """Télécharger toutes les entrées DisProt""" |
| print("\n📥 Téléchargement DisProt...") |
|
|
| url = f"{self.BASE_URL}/search?release=current&show_ambiguous=false&format=json" |
|
|
| try: |
| response = requests.get(url, timeout=120) |
| response.raise_for_status() |
| data = response.json() |
|
|
| entries = data.get('data', []) |
| print(f" ✓ {len(entries)} entrées téléchargées") |
|
|
| records = [] |
| for entry in tqdm(entries, desc=" Parsing"): |
| acc = entry.get('acc', '') |
| disprot_id = entry.get('disprot_id', '') |
| name = entry.get('name', '') |
| sequence = entry.get('sequence', '') |
| organism = entry.get('organism', '') |
|
|
|
|
| for region in entry.get('regions', []): |
| records.append({ |
| 'disprot_id': disprot_id, |
| 'uniprot_acc': acc, |
| 'name': name, |
| 'organism': organism, |
| 'sequence': sequence, |
| 'region_start': region.get('start', 0), |
| 'region_end': region.get('end', 0), |
| 'region_type': region.get('type', ''), |
| 'term_name': region.get('term_name', ''), |
| 'evidence': region.get('evidence', '') |
| }) |
|
|
| df = pd.DataFrame(records) |
|
|
|
|
| save_path = self.save_dir / 'disprot_data.parquet' |
| df.to_parquet(save_path) |
| print(f" ✓ Sauvegardé: {save_path}") |
|
|
| return df |
|
|
| except Exception as e: |
| print(f" ✗ Erreur DisProt: {e}") |
| return pd.DataFrame() |
|
|
|
|
|
|
|
|
| class UniProtDownloader: |
|
|
| BASE_URL = "https://rest.uniprot.org/uniprotkb" |
|
|
| def __init__(self, save_dir: Path): |
| self.save_dir = save_dir |
| self.save_dir.mkdir(parents=True, exist_ok=True) |
|
|
| def download_mitochondrial_human(self, max_results: int = 5000) -> pd.DataFrame: |
| """Télécharger les protéines mitochondriales humaines""" |
|
|
| query = "(organism_id:9606) AND (cc_scl_term:SL-0173)" |
|
|
| url = f"{self.BASE_URL}/search" |
| params = { |
| 'query': query, |
| 'format': 'json', |
| 'size': min(500, max_results), |
| 'fields': 'accession,id,protein_name,gene_names,sequence,length,cc_subcellular_location,ft_domain,ft_region,organism_name' |
| } |
|
|
| all_results = [] |
|
|
| try: |
| response = requests.get(url, params=params, timeout=120) |
| response.raise_for_status() |
| data = response.json() |
|
|
| results = data.get('results', []) |
| all_results.extend(results) |
| print(f" ✓ {len(results)} ") |
|
|
| next_link = data.get('link', {}).get('next') |
| while next_link and len(all_results) < max_results: |
| time.sleep(0.5) |
| response = requests.get(next_link, timeout=120) |
| response.raise_for_status() |
| data = response.json() |
| results = data.get('results', []) |
| all_results.extend(results) |
| next_link = data.get('link', {}).get('next') |
| print(f" ... {len(all_results)} protéines") |
|
|
| records = [] |
| for entry in tqdm(all_results[:max_results], desc=" Parsing"): |
| acc = entry.get('primaryAccession', '') |
|
|
| seq_data = entry.get('sequence', {}) |
| sequence = seq_data.get('value', '') |
| length = seq_data.get('length', 0) |
|
|
| protein_name = '' |
| if 'proteinDescription' in entry: |
| rec_name = entry['proteinDescription'].get('recommendedName', {}) |
| protein_name = rec_name.get('fullName', {}).get('value', '') |
|
|
| genes = entry.get('genes', []) |
| gene_name = genes[0].get('geneName', {}).get('value', '') if genes else '' |
|
|
| subcell = entry.get('comments', []) |
| locations = [] |
| for comment in subcell: |
| if comment.get('commentType') == 'SUBCELLULAR LOCATION': |
| for loc in comment.get('subcellularLocations', []): |
| loc_val = loc.get('location', {}).get('value', '') |
| if loc_val: |
| locations.append(loc_val) |
|
|
| features = entry.get('features', []) |
| disorder_regions = [] |
| for feat in features: |
| if feat.get('type') in ['Region', 'Compositional bias']: |
| desc = feat.get('description', '').lower() |
| if 'disordered' in desc or 'low complexity' in desc: |
| loc = feat.get('location', {}) |
| start = loc.get('start', {}).get('value', 0) |
| end = loc.get('end', {}).get('value', 0) |
| disorder_regions.append((start, end)) |
|
|
| records.append({ |
| 'uniprot_acc': acc, |
| 'protein_name': protein_name, |
| 'gene_name': gene_name, |
| 'sequence': sequence, |
| 'length': length, |
| 'subcellular_locations': '|'.join(locations), |
| 'disorder_regions': str(disorder_regions), |
| 'is_mitochondrial': True |
| }) |
|
|
| df = pd.DataFrame(records) |
|
|
| save_path = self.save_dir / 'uniprot_mitochondrial.parquet' |
| df.to_parquet(save_path) |
| print(f" ✓ : {save_path}") |
|
|
| return df |
|
|
| except Exception as e: |
| print(f" ✗ : {e}") |
| return pd.DataFrame() |
|
|
|
|
| class ClinVarDownloader: |
|
|
| def __init__(self, save_dir: Path): |
| self.save_dir = save_dir |
| self.save_dir.mkdir(parents=True, exist_ok=True) |
|
|
| def download_variants_for_genes(self, gene_list: List[str], max_per_gene: int = 100) -> pd.DataFrame: |
| base_url = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils" |
|
|
| all_variants = [] |
|
|
| for gene in tqdm(gene_list[:50], desc=" Gènes"): |
| try: |
| search_url = f"{base_url}/esearch.fcgi" |
| search_params = { |
| 'db': 'clinvar', |
| 'term': f'{gene}[gene] AND ("pathogenic"[clinsig] OR "benign"[clinsig]) AND "single nucleotide variant"[vartype]', |
| 'retmax': max_per_gene, |
| 'retmode': 'json' |
| } |
|
|
| response = requests.get(search_url, params=search_params, timeout=30) |
| response.raise_for_status() |
| search_data = response.json() |
|
|
| id_list = search_data.get('esearchresult', {}).get('idlist', []) |
|
|
| if not id_list: |
| continue |
|
|
|
|
| time.sleep(0.34) |
|
|
| fetch_url = f"{base_url}/esummary.fcgi" |
| fetch_params = { |
| 'db': 'clinvar', |
| 'id': ','.join(id_list[:max_per_gene]), |
| 'retmode': 'json' |
| } |
|
|
| response = requests.get(fetch_url, params=fetch_params, timeout=30) |
| response.raise_for_status() |
| fetch_data = response.json() |
|
|
| results = fetch_data.get('result', {}) |
|
|
| for uid in id_list[:max_per_gene]: |
| if uid not in results or uid == 'uids': |
| continue |
|
|
| variant = results[uid] |
|
|
|
|
| title = variant.get('title', '') |
| clinical_sig = variant.get('clinical_significance', {}).get('description', '') |
|
|
| protein_change = '' |
| if '(p.' in title: |
| start = title.find('(p.') + 3 |
| end = title.find(')', start) |
| protein_change = title[start:end] |
|
|
| all_variants.append({ |
| 'clinvar_id': uid, |
| 'gene': gene, |
| 'title': title, |
| 'protein_change': protein_change, |
| 'clinical_significance': clinical_sig, |
| 'is_pathogenic': 'pathogenic' in clinical_sig.lower(), |
| 'is_benign': 'benign' in clinical_sig.lower() |
| }) |
|
|
| time.sleep(0.34) |
|
|
| except Exception as e: |
| print(f" Error {gene}: {e}") |
| continue |
|
|
| df = pd.DataFrame(all_variants) |
|
|
| if len(df) > 0: |
| save_path = self.save_dir / 'clinvar_variants.parquet' |
| df.to_parquet(save_path) |
| print(f" ✓ {len(df)} {save_path}") |
| else: |
| print("None") |
|
|
| return df |
|
|
| class MobiDBDownloader: |
|
|
| BASE_URL = "https://mobidb.org/api/download" |
|
|
| def __init__(self, save_dir: Path): |
| self.save_dir = save_dir |
| self.save_dir.mkdir(parents=True, exist_ok=True) |
|
|
| def download_for_proteins(self, uniprot_accs: List[str]) -> pd.DataFrame: |
|
|
|
|
| records = [] |
|
|
| for acc in tqdm(uniprot_accs[:200], desc=" Protéines"): |
| try: |
| url = f"https://mobidb.org/api/download?acc={acc}&format=json" |
| response = requests.get(url, timeout=30) |
|
|
| if response.status_code != 200: |
| continue |
|
|
| data = response.json() |
|
|
| consensus = data.get('consensus', {}) |
| disorder_regions = consensus.get('disorder', {}).get('regions', []) |
|
|
| plddt_regions = consensus.get('plddt', {}).get('regions', []) |
|
|
| records.append({ |
| 'uniprot_acc': acc, |
| 'disorder_content': data.get('disorder_content', 0), |
| 'disorder_regions': str(disorder_regions), |
| 'plddt_low_regions': str(plddt_regions), |
| 'sequence_length': data.get('length', 0) |
| }) |
|
|
| time.sleep(0.1) |
|
|
| except Exception as e: |
| continue |
|
|
| df = pd.DataFrame(records) |
|
|
| if len(df) > 0: |
| save_path = self.save_dir / 'mobidb_data.parquet' |
| df.to_parquet(save_path) |
| print(f" ✓ {len(df)} entrées sauvegardées: {save_path}") |
|
|
| return df |
|
|
|
|
| disprot_downloader = DisProtDownloader(PATHS['disprot']) |
| df_disprot = disprot_downloader.download_all() |
|
|
| uniprot_downloader = UniProtDownloader(PATHS['uniprot']) |
| df_uniprot = uniprot_downloader.download_mitochondrial_human(max_results=2000) |
|
|
| if len(df_uniprot) > 0: |
| mito_genes = df_uniprot['gene_name'].dropna().unique().tolist() |
| mito_genes = [g for g in mito_genes if g] |
|
|
| clinvar_downloader = ClinVarDownloader(PATHS['clinvar']) |
| df_clinvar = clinvar_downloader.download_variants_for_genes(mito_genes[:100]) |
| else: |
| df_clinvar = pd.DataFrame() |
|
|
| if len(df_uniprot) > 0: |
| mito_accs = df_uniprot['uniprot_acc'].tolist() |
|
|
| mobidb_downloader = MobiDBDownloader(PATHS['mobidb']) |
| df_mobidb = mobidb_downloader.download_for_proteins(mito_accs[:200]) |
| else: |
| df_mobidb = pd.DataFrame() |