| import json
|
| import os
|
| import sys
|
| from typing import Dict, List, Set
|
|
|
| from Bio import SeqIO
|
| from ete3 import NCBITaxa
|
| from tqdm import tqdm
|
|
|
|
|
|
|
| TARGET_RANKS = [
|
| "phylum",
|
| "class",
|
| "order",
|
| "family",
|
| "genus",
|
| "species",
|
| "subspecies"
|
| ]
|
|
|
| class TaxonomyBuilder:
|
| def __init__(self):
|
| """
|
| Initialize NCBITaxa.
|
| Note: The first run will download/update the taxonomy database (~300MB).
|
| """
|
| self.ncbi = NCBITaxa()
|
| self.vocab_sets: Dict[str, Set[str]] = {rank: set() for rank in TARGET_RANKS}
|
| self.observed_taxids: Set[int] = set()
|
|
|
| def extract_taxid_from_header(self, header: str) -> int:
|
| """
|
| Extracts NCBI Taxonomy ID from UniProt-style fasta header.
|
| Format example: "... OX=9606 ..." -> returns 9606
|
| """
|
| import re
|
| try:
|
|
|
| match = re.search(r"OX=(\d+)", header)
|
| if match:
|
| return int(match.group(1))
|
| except Exception:
|
| pass
|
| return None
|
|
|
| def get_lineage_names(self, tax_id: int) -> Dict[str, str]:
|
| """
|
| Retrieves the full lineage for a given tax_id and maps it to TARGET_RANKS.
|
| Missing ranks are handled by propagating the parent rank or marking as <UNK>.
|
| """
|
| try:
|
|
|
| lineage_ids = self.ncbi.get_lineage(tax_id)
|
|
|
| ranks = self.ncbi.get_rank(lineage_ids)
|
|
|
| names = self.ncbi.get_taxid_translator(lineage_ids)
|
|
|
|
|
| rank_to_name = {}
|
| for tid in lineage_ids:
|
| rank = ranks.get(tid)
|
| if rank == "strain":
|
| rank = "subspecies"
|
|
|
| if rank in TARGET_RANKS:
|
| rank_to_name[rank] = names[tid]
|
|
|
|
|
| result = {}
|
| for rank in TARGET_RANKS:
|
| if rank in rank_to_name:
|
| name = rank_to_name[rank]
|
| self.vocab_sets[rank].add(name)
|
| result[rank] = name
|
| else:
|
| result[rank] = "<UNK>"
|
|
|
| return result
|
|
|
| except ValueError:
|
|
|
| return {rank: "<UNK>" for rank in TARGET_RANKS}
|
|
|
| def build_from_fasta(self, fasta_paths: List[str]):
|
| """
|
| Iterates over FASTA files to collect all unique taxonomy terms.
|
| """
|
| print(f"Processing FASTA files: {fasta_paths}")
|
|
|
| count = 0
|
| for path in fasta_paths:
|
|
|
| for record in tqdm(SeqIO.parse(path, "fasta"), desc=f"Reading {os.path.basename(path)}"):
|
| tax_id = self.extract_taxid_from_header(record.description)
|
| if tax_id:
|
| self.observed_taxids.add(tax_id)
|
| self.get_lineage_names(tax_id)
|
| count += 1
|
|
|
| print(f"Processed {count} sequences from FASTA.")
|
|
|
| def build_from_tsv(self, tsv_paths: List[str]):
|
| """
|
| Iterates over TSV files to collect unique TaxIDs and update vocab.
|
| Expected format: TaxID \t SpeciesName (Header skipped if present)
|
| """
|
| if not tsv_paths:
|
| return
|
|
|
| print(f"Processing TSV files: {tsv_paths}")
|
| count = 0
|
| for path in tsv_paths:
|
| with open(path, 'r') as f:
|
|
|
| first_line = f.readline()
|
|
|
|
|
| try:
|
| int(first_line.split('\t')[0])
|
| f.seek(0)
|
| except ValueError:
|
| pass
|
|
|
| for line in f:
|
| parts = line.strip().split('\t')
|
| if parts:
|
| try:
|
| tax_id = int(parts[0])
|
| self.observed_taxids.add(tax_id)
|
| self.get_lineage_names(tax_id)
|
| count += 1
|
| except ValueError:
|
| pass
|
| print(f"Processed {count} additional IDs from TSV.")
|
|
|
| def save_vocab(self, output_dir: str):
|
| """
|
| Saves the vocabulary dictionaries to JSON files.
|
| 0 index is reserved for <UNK>.
|
| """
|
| os.makedirs(output_dir, exist_ok=True)
|
|
|
|
|
| observed_path = os.path.join(output_dir, "observed_taxids.json")
|
| with open(observed_path, "w") as f:
|
| json.dump(list(self.observed_taxids), f)
|
| print(f"Saved {len(self.observed_taxids)} observed TaxIDs to {observed_path}")
|
|
|
| for rank in TARGET_RANKS:
|
|
|
| unique_terms = sorted(list(self.vocab_sets[rank]))
|
|
|
|
|
|
|
| vocab_map = {"<UNK>": 0}
|
| for idx, term in enumerate(unique_terms, 1):
|
| vocab_map[term] = idx
|
|
|
|
|
| filename = os.path.join(output_dir, f"{rank}_vocab.json")
|
| with open(filename, "w") as f:
|
| json.dump(vocab_map, f, indent=2)
|
|
|
| print(f"Saved {rank} vocab with {len(vocab_map)} terms to {filename}")
|
|
|
|
|
| if __name__ == "__main__":
|
| import argparse
|
| parser = argparse.ArgumentParser(description="Build taxonomy vocabulary from FASTA files.")
|
| parser.add_argument("--input_fasta", nargs='+', required=True, help="Path to input FASTA file(s)")
|
| parser.add_argument("--input_tsv", nargs='+', help="Path to input TSV file(s) with TaxIDs")
|
| parser.add_argument("--output_dir", required=True, help="Directory to save vocabulary JSON files")
|
|
|
| args = parser.parse_args()
|
|
|
|
|
| for f in args.input_fasta:
|
| if not os.path.exists(f):
|
| print(f"Error: Input file at {f} not found.")
|
| sys.exit(1)
|
|
|
| if args.input_tsv:
|
| for f in args.input_tsv:
|
| if not os.path.exists(f):
|
| print(f"Error: Input file at {f} not found.")
|
| sys.exit(1)
|
|
|
| builder = TaxonomyBuilder()
|
| builder.build_from_fasta(args.input_fasta)
|
| if args.input_tsv:
|
| builder.build_from_tsv(args.input_tsv)
|
|
|
| builder.save_vocab(args.output_dir)
|
|
|