File size: 4,154 Bytes
8e574de
 
ae74a72
8e574de
ae74a72
 
8e574de
ae74a72
 
 
 
 
 
 
 
8e574de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
#!/usr/bin/env python3
"""
Add UniProt sequences to FireProtDB parquet/csv.

Outputs:
- Parquet file containing a newly appended sequence column

Usage:
    python 02_add_uniprot_sequences.py \
        --input ../data/fireprotdb_cleaned.parquet \
        --output ../data/fireprotdb_with_sequences.parquet \
        --cache ../data/uniprot_cache.tsv

Notes:
- This script generates a cache file, which can be used to restart failed or paused runs.
"""

from __future__ import annotations

import argparse
import os
import time
from typing import Dict, Optional

import pandas as pd
import requests

UNIPROT_FASTA = "https://rest.uniprot.org/uniprotkb/{acc}.fasta"

def read_cache(path: str) -> Dict[str, str]:
    if not os.path.exists(path):
        return {}
    cache = {}
    with open(path, "r") as f:
        for line in f:
            line = line.rstrip("\n")
            if not line:
                continue
            acc, seq = line.split("\t", 1)
            cache[acc] = seq
    return cache

def append_cache(path: str, acc: str, seq: str) -> None:
    os.makedirs(os.path.dirname(path), exist_ok=True)
    with open(path, "a") as f:
        f.write(f"{acc}\t{seq}\n")

def parse_fasta(text: str) -> Optional[str]:
    lines = [ln.strip() for ln in text.splitlines() if ln.strip()]
    if not lines or not lines[0].startswith(">"):
        return None
    return "".join(lines[1:]).strip() or None

def fetch_uniprot_fasta(acc: str, session: requests.Session, timeout: int = 20) -> Optional[str]:
    url = UNIPROT_FASTA.format(acc=acc)
    r = session.get(url, timeout=timeout)
    if r.status_code != 200:
        return None
    return parse_fasta(r.text)

def main():
    ap = argparse.ArgumentParser()
    ap.add_argument("--input", default="../data/fireprotdb_cleaned.parquet")
    ap.add_argument("--output", default="../data/fireprotdb_with_sequences.parquet")
    ap.add_argument("--cache", default="../data/cache/uniprot_cache.tsv")
    ap.add_argument("--sleep", type=float, default=0.05, help="politeness delay between requests")
    ap.add_argument("--limit", type=int, default=0, help="debug: only fetch first N missing")
    args = ap.parse_args()

    df = pd.read_parquet(args.input)

    # Normalize accession column
    if "uniprotkb" not in df.columns:
        raise ValueError("Expected column 'uniprotkb' in input parquet")

    accs = df["uniprotkb"].astype("string").fillna("").str.strip()
    unique_accs = sorted({a for a in accs.unique().tolist() if a})

    cache = read_cache(args.cache)

    missing = [a for a in unique_accs if a not in cache]
    if args.limit and args.limit > 0:
        missing = missing[: args.limit]

    print(f"Unique accessions: {len(unique_accs):,}")
    print(f"Cached: {len(cache):,}")
    print(f"Missing to fetch: {len(missing):,}")

    session = requests.Session()
    fetched = 0

    for i, acc in enumerate(missing, 1):
        seq = fetch_uniprot_fasta(acc, session=session)
        if seq:
            cache[acc] = seq
            append_cache(args.cache, acc, seq)
            fetched += 1
        # brief delay (avoid hammering API)
        time.sleep(args.sleep)

        if i % 250 == 0:
            print(f"Fetched {fetched:,}/{i:,} missing...")

    # Add sequences
    df["sequence"] = df["uniprotkb"].astype("string").fillna("").str.strip().map(lambda a: cache.get(a, None))

    # Compute lengths as plain numeric (avoid pd.NA boolean ambiguity)
    df["sequence_len_uniprot"] = df["sequence"].map(lambda s: len(s) if isinstance(s, str) else None)

    fp_len = pd.to_numeric(df.get("sequence_length", pd.NA), errors="coerce")
    up_len = pd.to_numeric(df["sequence_len_uniprot"], errors="coerce")

    df["sequence_length_num"] = fp_len
    df["length_match"] = (up_len == fp_len) & up_len.notna() & fp_len.notna()
    
    match_rate = float(df["length_match"].fillna(False).mean()) if len(df) else 0.0

    print(f"Rows with sequence: {df['sequence'].notna().sum():,}/{len(df):,}")
    print(f"Length match rate (rows): {match_rate:.3f}")

    df.to_parquet(args.output, index=False)
    print(f"Wrote: {args.output}")

if __name__ == "__main__":
    main()