Taxonaware-ESM2 / process_submission.py
Smilesjs's picture
Upload folder using huggingface_hub
2d6fa7d verified
import sys
import argparse
import networkx as nx
import obonet
import csv
from tqdm import tqdm
import math
import os
# Increase CSV field size limit (Windows compatibility: use 2**31-1)
csv.field_size_limit(2147483647)
def load_dag(obo_path):
print(f"Loading OBO file from {obo_path}...")
graph = obonet.read_obo(obo_path)
print(f"DAG loaded. Nodes: {len(graph)}, Edges: {len(graph.edges)}")
# 1. Parents Map (Child -> Parents)
# obonet: edge is Child -> Parent ('is_a')
parents_map = {n: list(graph.successors(n)) for n in graph.nodes()}
# 2. Ancestors Map (Child -> All Ancestors) for fast active set expansion
# This takes a few seconds for 47k nodes.
print("Pre-computing ancestor maps...")
ancestors_map = {}
for node in graph.nodes():
ancestors_map[node] = nx.ancestors(graph, node)
return graph, parents_map, ancestors_map
def process_single_protein(entries, parents_map, ancestors_map, topo_order, alpha, power):
# 1. Init Scores
# We must track all 'active' nodes (input terms + their ancestors)
# Use a dictionary {GO: score}. Default 0.
scores = {}
for term, score in entries:
scores[term] = max(scores.get(term, 0.0), score) # Handle duplicate lines if any
# Expand to ancestors
# Only necessary if positive propagation needs them explicitly
# But usually we just want to ensure parents exist in dict if we are boosting them.
# Let's collect all involved nodes.
active_keys = list(scores.keys())
for term in active_keys:
# Add all ancestors with 0.0 if not present
# This is important so Positive prop can boost them from 0 to Child_Score
if term in ancestors_map:
for anc in ancestors_map[term]:
if anc not in scores:
scores[anc] = 0.0
# Now we have a sparse dict 'scores' containing ~50-100 terms.
# We want to propagate on this subgraph.
# Create a sorted list of active nodes based on global topo_order
# Global topo_order is [Child, ..., Parent]
# Filter global list to current keys
# Optimization: Sorting the keys by their index in global topo might be slow if map is big O(N).
# N=47k.
# Faster: Assign rank to each node once globally.
# Then sort active_nodes by rank.
# We'll assume 'topo_order' passed in is a dict {Node: Rank}.
# Sort active nodes
# If a node is missing from topo_order (e.g. obsolete), ignore or put at end?
# Put at end implies it's a root or disconnected?
# Safe to ignore order for unknown nodes, or sort by name?
# Let's simple sort:
active_nodes = sorted(scores.keys(), key=lambda n: topo_order.get(n, 0)) # Rank 0 = Leaf-ish?
# Wait, nx.topological_sort gives [Leaf, ..., Root] (if Child->Parent edges).
# So index 0 is Leaf. Index N is Root.
# ---------------------------------------------------------
# Step 1: Positive Propagation (Child -> Parent)
# Iterate from Child to Parent (index 0 -> N).
# For each node, propagate its score to its immediate parents.
# ---------------------------------------------------------
for node in active_nodes:
my_score = scores[node]
if my_score <= 0: continue
if node in parents_map:
for parent in parents_map[node]:
# Update parent max
# Only update if parent is tracked (it should be, due to ancestor expansion)
if parent in scores:
scores[parent] = max(scores[parent], my_score)
else:
# Should not happen if ancestors_map was correct
pass
# ---------------------------------------------------------
# Step 2: Negative Propagation (Parent -> Child)
# Iterate from Parent to Child (index N -> 0).
# Logic: Child = Child * (Parent * Alpha + (1-Alpha))
# Note: A node may have multiple parents. Which one controls?
# Usually 'consistent' means <= min(parents).
# If we use the soft formula, we need to defined 'Parent Score'.
# Max(parents)? Min(parents)? Avg?
# Standard consistency requires <= all parents. So we use MIN of all parents.
# If a root node, Parent Score = 1.0.
# ---------------------------------------------------------
# Iterate backwards
for node in reversed(active_nodes):
if node not in parents_map or not parents_map[node]:
# Root node (or no parents in active set?)
# If root, implicit parent is 1.0.
# No change needed.
continue
# Get parent scores
parent_scores = []
for p in parents_map[node]:
if p in scores:
parent_scores.append(scores[p])
else:
# Parent not in active set?
# Should not happen (ancestors expanded).
# Assume 0.0? Or 1.0?
# If ancestor expansion worked, p should be in scores.
# If p is not in scores, it's not a GO term (maybe).
pass
if not parent_scores:
continue
# Using MIN parent score as the limiting factor
min_parent_score = min(parent_scores)
# Formula: Child = Child * (P * alpha + (1-alpha))
# If P=1, Factor=1. If P=0, Factor=1-alpha.
factor = min_parent_score * alpha + (1.0 - alpha)
scores[node] = scores[node] * factor
# ---------------------------------------------------------
# Step 3: Power Scaling
# ---------------------------------------------------------
results = {}
for term, osc in scores.items():
if osc > 0:
# Power
final = math.pow(osc, power)
results[term] = final
return results
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_file')
parser.add_argument('output_file')
parser.add_argument('--obo', required=True)
parser.add_argument('--alpha', type=float, default=0.7)
parser.add_argument('--power', type=float, default=2.0)
args = parser.parse_args()
# 1. Load Dag
graph, parents_map, ancestors_map = load_dag(args.obo)
# 2. Topo Rank
print("Computing topological sort rank...")
try:
topo_list = list(nx.topological_sort(graph))
except nx.NetworkXUnfeasible:
print("Warning: Cycle detected. Using imprecise sort.")
topo_list = list(graph.nodes())
# Map Node -> Rank (Index)
topo_rank = {node: i for i, node in enumerate(topo_list)}
# 3. Stream
# Estimate lines
try:
# Windows specific quick size check?
# just assume large. 100M lines.
est_lines = 112000000
except:
est_lines = 0
print(f"Streaming {args.input_file} -> {args.output_file}...")
with open(args.input_file, 'r') as fin, open(args.output_file, 'w', newline='') as fout:
reader = csv.reader(fin, delimiter='\t')
writer = csv.writer(fout, delimiter='\t')
current_protein = None
current_entries = []
count = 0
pbar = tqdm(total=est_lines, unit="line")
for row in reader:
pbar.update(1)
if not row: continue
# Submission format: ProteinID, GO, Score
# If duplicates exist, we handle in process_single_protein
p_id, go_id, score_str = row[0], row[1], row[2]
try:
score = float(score_str)
except ValueError:
continue # Skip header or bad lines
if p_id != current_protein:
if current_protein is not None:
# Process
res_dict = process_single_protein(current_entries, parents_map, ancestors_map, topo_rank, args.alpha, args.power)
# Write
# Filter low scores? CAFA usually keeps top N or > threshold.
# We output all non-zero.
for term, val in res_dict.items():
if val > 0.001: # Optimization: drop very low
writer.writerow([current_protein, term, f"{val:.6f}"])
count += 1
current_protein = p_id
current_entries = []
current_entries.append((go_id, score))
# Last one
if current_protein is not None:
res_dict = process_single_protein(current_entries, parents_map, ancestors_map, topo_rank, args.alpha, args.power)
for term, val in res_dict.items():
if val > 0.001:
writer.writerow([current_protein, term, f"{val:.6f}"])
count += 1
pbar.close()
print(f"Finished. Processed {count} unique proteins.")
if __name__ == "__main__":
main()