FireProtDB2 / src /02_add_uniprot_sequences.py
drake463's picture
final pipeline and updated subsets
ae74a72
#!/usr/bin/env python3
"""
Add UniProt sequences to FireProtDB parquet/csv.
Outputs:
- Parquet file containing a newly appended sequence column
Usage:
python 02_add_uniprot_sequences.py \
--input ../data/fireprotdb_cleaned.parquet \
--output ../data/fireprotdb_with_sequences.parquet \
--cache ../data/uniprot_cache.tsv
Notes:
- This script generates a cache file, which can be used to restart failed or paused runs.
"""
from __future__ import annotations
import argparse
import os
import time
from typing import Dict, Optional
import pandas as pd
import requests
UNIPROT_FASTA = "https://rest.uniprot.org/uniprotkb/{acc}.fasta"
def read_cache(path: str) -> Dict[str, str]:
if not os.path.exists(path):
return {}
cache = {}
with open(path, "r") as f:
for line in f:
line = line.rstrip("\n")
if not line:
continue
acc, seq = line.split("\t", 1)
cache[acc] = seq
return cache
def append_cache(path: str, acc: str, seq: str) -> None:
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "a") as f:
f.write(f"{acc}\t{seq}\n")
def parse_fasta(text: str) -> Optional[str]:
lines = [ln.strip() for ln in text.splitlines() if ln.strip()]
if not lines or not lines[0].startswith(">"):
return None
return "".join(lines[1:]).strip() or None
def fetch_uniprot_fasta(acc: str, session: requests.Session, timeout: int = 20) -> Optional[str]:
url = UNIPROT_FASTA.format(acc=acc)
r = session.get(url, timeout=timeout)
if r.status_code != 200:
return None
return parse_fasta(r.text)
def main():
ap = argparse.ArgumentParser()
ap.add_argument("--input", default="../data/fireprotdb_cleaned.parquet")
ap.add_argument("--output", default="../data/fireprotdb_with_sequences.parquet")
ap.add_argument("--cache", default="../data/cache/uniprot_cache.tsv")
ap.add_argument("--sleep", type=float, default=0.05, help="politeness delay between requests")
ap.add_argument("--limit", type=int, default=0, help="debug: only fetch first N missing")
args = ap.parse_args()
df = pd.read_parquet(args.input)
# Normalize accession column
if "uniprotkb" not in df.columns:
raise ValueError("Expected column 'uniprotkb' in input parquet")
accs = df["uniprotkb"].astype("string").fillna("").str.strip()
unique_accs = sorted({a for a in accs.unique().tolist() if a})
cache = read_cache(args.cache)
missing = [a for a in unique_accs if a not in cache]
if args.limit and args.limit > 0:
missing = missing[: args.limit]
print(f"Unique accessions: {len(unique_accs):,}")
print(f"Cached: {len(cache):,}")
print(f"Missing to fetch: {len(missing):,}")
session = requests.Session()
fetched = 0
for i, acc in enumerate(missing, 1):
seq = fetch_uniprot_fasta(acc, session=session)
if seq:
cache[acc] = seq
append_cache(args.cache, acc, seq)
fetched += 1
# brief delay (avoid hammering API)
time.sleep(args.sleep)
if i % 250 == 0:
print(f"Fetched {fetched:,}/{i:,} missing...")
# Add sequences
df["sequence"] = df["uniprotkb"].astype("string").fillna("").str.strip().map(lambda a: cache.get(a, None))
# Compute lengths as plain numeric (avoid pd.NA boolean ambiguity)
df["sequence_len_uniprot"] = df["sequence"].map(lambda s: len(s) if isinstance(s, str) else None)
fp_len = pd.to_numeric(df.get("sequence_length", pd.NA), errors="coerce")
up_len = pd.to_numeric(df["sequence_len_uniprot"], errors="coerce")
df["sequence_length_num"] = fp_len
df["length_match"] = (up_len == fp_len) & up_len.notna() & fp_len.notna()
match_rate = float(df["length_match"].fillna(False).mean()) if len(df) else 0.0
print(f"Rows with sequence: {df['sequence'].notna().sum():,}/{len(df):,}")
print(f"Length match rate (rows): {match_rate:.3f}")
df.to_parquet(args.output, index=False)
print(f"Wrote: {args.output}")
if __name__ == "__main__":
main()