Upload folder using huggingface_hub
Browse files- .gitattributes +2 -0
- go-basic.obo +3 -0
- process_submission.py +237 -0
- ta_esm2_submission.tsv +3 -0
.gitattributes
CHANGED
|
@@ -68,3 +68,5 @@ outputs/gpu_preds_homolog_epoch_10.tsv filter=lfs diff=lfs merge=lfs -text
|
|
| 68 |
outputs/gpu_preds_homolog_epoch_3.tsv filter=lfs diff=lfs merge=lfs -text
|
| 69 |
outputs/gpu_preds_novel_epoch_10.tsv filter=lfs diff=lfs merge=lfs -text
|
| 70 |
outputs/gpu_preds_novel_epoch_3.tsv filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 68 |
outputs/gpu_preds_homolog_epoch_3.tsv filter=lfs diff=lfs merge=lfs -text
|
| 69 |
outputs/gpu_preds_novel_epoch_10.tsv filter=lfs diff=lfs merge=lfs -text
|
| 70 |
outputs/gpu_preds_novel_epoch_3.tsv filter=lfs diff=lfs merge=lfs -text
|
| 71 |
+
go-basic.obo filter=lfs diff=lfs merge=lfs -text
|
| 72 |
+
ta_esm2_submission.tsv filter=lfs diff=lfs merge=lfs -text
|
go-basic.obo
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ca48860e6fbcf0ab65a51a9a275b6d184c3a36dd80cd02b1f193e03fcfd4d00b
|
| 3 |
+
size 31350074
|
process_submission.py
ADDED
|
@@ -0,0 +1,237 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import sys
|
| 3 |
+
import argparse
|
| 4 |
+
import networkx as nx
|
| 5 |
+
import obonet
|
| 6 |
+
import csv
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
import math
|
| 9 |
+
import os
|
| 10 |
+
|
| 11 |
+
# Increase CSV field size limit (Windows compatibility: use 2**31-1)
|
| 12 |
+
csv.field_size_limit(2147483647)
|
| 13 |
+
|
| 14 |
+
def load_dag(obo_path):
|
| 15 |
+
print(f"Loading OBO file from {obo_path}...")
|
| 16 |
+
graph = obonet.read_obo(obo_path)
|
| 17 |
+
print(f"DAG loaded. Nodes: {len(graph)}, Edges: {len(graph.edges)}")
|
| 18 |
+
|
| 19 |
+
# 1. Parents Map (Child -> Parents)
|
| 20 |
+
# obonet: edge is Child -> Parent ('is_a')
|
| 21 |
+
parents_map = {n: list(graph.successors(n)) for n in graph.nodes()}
|
| 22 |
+
|
| 23 |
+
# 2. Ancestors Map (Child -> All Ancestors) for fast active set expansion
|
| 24 |
+
# This takes a few seconds for 47k nodes.
|
| 25 |
+
print("Pre-computing ancestor maps...")
|
| 26 |
+
ancestors_map = {}
|
| 27 |
+
for node in graph.nodes():
|
| 28 |
+
ancestors_map[node] = nx.ancestors(graph, node)
|
| 29 |
+
|
| 30 |
+
return graph, parents_map, ancestors_map
|
| 31 |
+
|
| 32 |
+
def process_single_protein(entries, parents_map, ancestors_map, topo_order, alpha, power):
|
| 33 |
+
# 1. Init Scores
|
| 34 |
+
# We must track all 'active' nodes (input terms + their ancestors)
|
| 35 |
+
# Use a dictionary {GO: score}. Default 0.
|
| 36 |
+
|
| 37 |
+
scores = {}
|
| 38 |
+
for term, score in entries:
|
| 39 |
+
scores[term] = max(scores.get(term, 0.0), score) # Handle duplicate lines if any
|
| 40 |
+
|
| 41 |
+
# Expand to ancestors
|
| 42 |
+
# Only necessary if positive propagation needs them explicitly
|
| 43 |
+
# But usually we just want to ensure parents exist in dict if we are boosting them.
|
| 44 |
+
# Let's collect all involved nodes.
|
| 45 |
+
|
| 46 |
+
active_keys = list(scores.keys())
|
| 47 |
+
for term in active_keys:
|
| 48 |
+
# Add all ancestors with 0.0 if not present
|
| 49 |
+
# This is important so Positive prop can boost them from 0 to Child_Score
|
| 50 |
+
if term in ancestors_map:
|
| 51 |
+
for anc in ancestors_map[term]:
|
| 52 |
+
if anc not in scores:
|
| 53 |
+
scores[anc] = 0.0
|
| 54 |
+
|
| 55 |
+
# Now we have a sparse dict 'scores' containing ~50-100 terms.
|
| 56 |
+
# We want to propagate on this subgraph.
|
| 57 |
+
|
| 58 |
+
# Create a sorted list of active nodes based on global topo_order
|
| 59 |
+
# Global topo_order is [Child, ..., Parent]
|
| 60 |
+
# Filter global list to current keys
|
| 61 |
+
# Optimization: Sorting the keys by their index in global topo might be slow if map is big O(N).
|
| 62 |
+
# N=47k.
|
| 63 |
+
# Faster: Assign rank to each node once globally.
|
| 64 |
+
# Then sort active_nodes by rank.
|
| 65 |
+
# We'll assume 'topo_order' passed in is a dict {Node: Rank}.
|
| 66 |
+
|
| 67 |
+
# Sort active nodes
|
| 68 |
+
# If a node is missing from topo_order (e.g. obsolete), ignore or put at end?
|
| 69 |
+
# Put at end implies it's a root or disconnected?
|
| 70 |
+
# Safe to ignore order for unknown nodes, or sort by name?
|
| 71 |
+
# Let's simple sort:
|
| 72 |
+
active_nodes = sorted(scores.keys(), key=lambda n: topo_order.get(n, 0)) # Rank 0 = Leaf-ish?
|
| 73 |
+
# Wait, nx.topological_sort gives [Leaf, ..., Root] (if Child->Parent edges).
|
| 74 |
+
# So index 0 is Leaf. Index N is Root.
|
| 75 |
+
|
| 76 |
+
# ---------------------------------------------------------
|
| 77 |
+
# Step 1: Positive Propagation (Child -> Parent)
|
| 78 |
+
# Iterate from Child to Parent (index 0 -> N).
|
| 79 |
+
# For each node, propagate its score to its immediate parents.
|
| 80 |
+
# ---------------------------------------------------------
|
| 81 |
+
for node in active_nodes:
|
| 82 |
+
my_score = scores[node]
|
| 83 |
+
if my_score <= 0: continue
|
| 84 |
+
|
| 85 |
+
if node in parents_map:
|
| 86 |
+
for parent in parents_map[node]:
|
| 87 |
+
# Update parent max
|
| 88 |
+
# Only update if parent is tracked (it should be, due to ancestor expansion)
|
| 89 |
+
if parent in scores:
|
| 90 |
+
scores[parent] = max(scores[parent], my_score)
|
| 91 |
+
else:
|
| 92 |
+
# Should not happen if ancestors_map was correct
|
| 93 |
+
pass
|
| 94 |
+
|
| 95 |
+
# ---------------------------------------------------------
|
| 96 |
+
# Step 2: Negative Propagation (Parent -> Child)
|
| 97 |
+
# Iterate from Parent to Child (index N -> 0).
|
| 98 |
+
# Logic: Child = Child * (Parent * Alpha + (1-Alpha))
|
| 99 |
+
# Note: A node may have multiple parents. Which one controls?
|
| 100 |
+
# Usually 'consistent' means <= min(parents).
|
| 101 |
+
# If we use the soft formula, we need to defined 'Parent Score'.
|
| 102 |
+
# Max(parents)? Min(parents)? Avg?
|
| 103 |
+
# Standard consistency requires <= all parents. So we use MIN of all parents.
|
| 104 |
+
# If a root node, Parent Score = 1.0.
|
| 105 |
+
# ---------------------------------------------------------
|
| 106 |
+
|
| 107 |
+
# Iterate backwards
|
| 108 |
+
for node in reversed(active_nodes):
|
| 109 |
+
if node not in parents_map or not parents_map[node]:
|
| 110 |
+
# Root node (or no parents in active set?)
|
| 111 |
+
# If root, implicit parent is 1.0.
|
| 112 |
+
# No change needed.
|
| 113 |
+
continue
|
| 114 |
+
|
| 115 |
+
# Get parent scores
|
| 116 |
+
parent_scores = []
|
| 117 |
+
for p in parents_map[node]:
|
| 118 |
+
if p in scores:
|
| 119 |
+
parent_scores.append(scores[p])
|
| 120 |
+
else:
|
| 121 |
+
# Parent not in active set?
|
| 122 |
+
# Should not happen (ancestors expanded).
|
| 123 |
+
# Assume 0.0? Or 1.0?
|
| 124 |
+
# If ancestor expansion worked, p should be in scores.
|
| 125 |
+
# If p is not in scores, it's not a GO term (maybe).
|
| 126 |
+
pass
|
| 127 |
+
|
| 128 |
+
if not parent_scores:
|
| 129 |
+
continue
|
| 130 |
+
|
| 131 |
+
# Using MIN parent score as the limiting factor
|
| 132 |
+
min_parent_score = min(parent_scores)
|
| 133 |
+
|
| 134 |
+
# Formula: Child = Child * (P * alpha + (1-alpha))
|
| 135 |
+
# If P=1, Factor=1. If P=0, Factor=1-alpha.
|
| 136 |
+
factor = min_parent_score * alpha + (1.0 - alpha)
|
| 137 |
+
|
| 138 |
+
scores[node] = scores[node] * factor
|
| 139 |
+
|
| 140 |
+
# ---------------------------------------------------------
|
| 141 |
+
# Step 3: Power Scaling
|
| 142 |
+
# ---------------------------------------------------------
|
| 143 |
+
results = {}
|
| 144 |
+
for term, osc in scores.items():
|
| 145 |
+
if osc > 0:
|
| 146 |
+
# Power
|
| 147 |
+
final = math.pow(osc, power)
|
| 148 |
+
results[term] = final
|
| 149 |
+
|
| 150 |
+
return results
|
| 151 |
+
|
| 152 |
+
def main():
|
| 153 |
+
parser = argparse.ArgumentParser()
|
| 154 |
+
parser.add_argument('input_file')
|
| 155 |
+
parser.add_argument('output_file')
|
| 156 |
+
parser.add_argument('--obo', required=True)
|
| 157 |
+
parser.add_argument('--alpha', type=float, default=0.7)
|
| 158 |
+
parser.add_argument('--power', type=float, default=2.0)
|
| 159 |
+
args = parser.parse_args()
|
| 160 |
+
|
| 161 |
+
# 1. Load Dag
|
| 162 |
+
graph, parents_map, ancestors_map = load_dag(args.obo)
|
| 163 |
+
|
| 164 |
+
# 2. Topo Rank
|
| 165 |
+
print("Computing topological sort rank...")
|
| 166 |
+
try:
|
| 167 |
+
topo_list = list(nx.topological_sort(graph))
|
| 168 |
+
except nx.NetworkXUnfeasible:
|
| 169 |
+
print("Warning: Cycle detected. Using imprecise sort.")
|
| 170 |
+
topo_list = list(graph.nodes())
|
| 171 |
+
|
| 172 |
+
# Map Node -> Rank (Index)
|
| 173 |
+
topo_rank = {node: i for i, node in enumerate(topo_list)}
|
| 174 |
+
|
| 175 |
+
# 3. Stream
|
| 176 |
+
# Estimate lines
|
| 177 |
+
try:
|
| 178 |
+
# Windows specific quick size check?
|
| 179 |
+
# just assume large. 100M lines.
|
| 180 |
+
est_lines = 112000000
|
| 181 |
+
except:
|
| 182 |
+
est_lines = 0
|
| 183 |
+
|
| 184 |
+
print(f"Streaming {args.input_file} -> {args.output_file}...")
|
| 185 |
+
|
| 186 |
+
with open(args.input_file, 'r') as fin, open(args.output_file, 'w', newline='') as fout:
|
| 187 |
+
reader = csv.reader(fin, delimiter='\t')
|
| 188 |
+
writer = csv.writer(fout, delimiter='\t')
|
| 189 |
+
|
| 190 |
+
current_protein = None
|
| 191 |
+
current_entries = []
|
| 192 |
+
count = 0
|
| 193 |
+
|
| 194 |
+
pbar = tqdm(total=est_lines, unit="line")
|
| 195 |
+
|
| 196 |
+
for row in reader:
|
| 197 |
+
pbar.update(1)
|
| 198 |
+
if not row: continue
|
| 199 |
+
|
| 200 |
+
# Submission format: ProteinID, GO, Score
|
| 201 |
+
# If duplicates exist, we handle in process_single_protein
|
| 202 |
+
p_id, go_id, score_str = row[0], row[1], row[2]
|
| 203 |
+
try:
|
| 204 |
+
score = float(score_str)
|
| 205 |
+
except ValueError:
|
| 206 |
+
continue # Skip header or bad lines
|
| 207 |
+
|
| 208 |
+
if p_id != current_protein:
|
| 209 |
+
if current_protein is not None:
|
| 210 |
+
# Process
|
| 211 |
+
res_dict = process_single_protein(current_entries, parents_map, ancestors_map, topo_rank, args.alpha, args.power)
|
| 212 |
+
# Write
|
| 213 |
+
# Filter low scores? CAFA usually keeps top N or > threshold.
|
| 214 |
+
# We output all non-zero.
|
| 215 |
+
for term, val in res_dict.items():
|
| 216 |
+
if val > 0.001: # Optimization: drop very low
|
| 217 |
+
writer.writerow([current_protein, term, f"{val:.6f}"])
|
| 218 |
+
count += 1
|
| 219 |
+
|
| 220 |
+
current_protein = p_id
|
| 221 |
+
current_entries = []
|
| 222 |
+
|
| 223 |
+
current_entries.append((go_id, score))
|
| 224 |
+
|
| 225 |
+
# Last one
|
| 226 |
+
if current_protein is not None:
|
| 227 |
+
res_dict = process_single_protein(current_entries, parents_map, ancestors_map, topo_rank, args.alpha, args.power)
|
| 228 |
+
for term, val in res_dict.items():
|
| 229 |
+
if val > 0.001:
|
| 230 |
+
writer.writerow([current_protein, term, f"{val:.6f}"])
|
| 231 |
+
count += 1
|
| 232 |
+
|
| 233 |
+
pbar.close()
|
| 234 |
+
print(f"Finished. Processed {count} unique proteins.")
|
| 235 |
+
|
| 236 |
+
if __name__ == "__main__":
|
| 237 |
+
main()
|
ta_esm2_submission.tsv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3fd08943628399e93cdfb721f2b322462fea526525a87fcec7a4d5de71a18d67
|
| 3 |
+
size 2924941000
|