File size: 21,899 Bytes
6d1bbc7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
"""Tests for PPI LLM evaluation functions (src/negbiodb_ppi/llm_eval.py)."""

import json

import pytest

from negbiodb_ppi.llm_eval import (
    PPI_EVIDENCE_KEYWORDS,
    PPI_L2_REQUIRED_FIELDS,
    PPI_L3_JUDGE_PROMPT,
    compute_all_ppi_llm_metrics,
    evaluate_ppi_l1,
    evaluate_ppi_l2,
    evaluate_ppi_l3,
    evaluate_ppi_l4,
    parse_ppi_l1_answer,
    parse_ppi_l2_response,
    parse_ppi_l3_judge_scores,
    parse_ppi_l4_answer,
)


# ── PPI-L1 Parser Tests ───────────────────────────────────────────────────


class TestParsePPIL1Answer:
    def test_single_letter_a_through_d(self):
        for letter in "ABCD":
            assert parse_ppi_l1_answer(letter) == letter

    def test_case_insensitive(self):
        assert parse_ppi_l1_answer("a") == "A"
        assert parse_ppi_l1_answer("d") == "D"

    def test_answer_colon_format(self):
        assert parse_ppi_l1_answer("Answer: D") == "D"
        assert parse_ppi_l1_answer("Answer: B") == "B"

    def test_answer_is_format(self):
        assert parse_ppi_l1_answer("The answer is C") == "C"

    def test_parenthesized(self):
        assert parse_ppi_l1_answer("(D) Database score") == "D"

    def test_letter_dot_format(self):
        assert parse_ppi_l1_answer("A. Direct experimental") == "A"

    def test_letter_with_explanation(self):
        assert parse_ppi_l1_answer("B\nThis was a systematic screen") == "B"

    def test_empty_returns_none(self):
        assert parse_ppi_l1_answer("") is None

    def test_no_valid_letter_returns_none(self):
        assert parse_ppi_l1_answer("I don't know the answer") is None

    def test_no_e_category(self):
        """PPI uses 4-way (A-D), E should fallback to None or other."""
        # "E" is not in _PPI_L1_LETTERS, so should return None
        assert parse_ppi_l1_answer("E") is None


# ── PPI-L1 Evaluator Tests ───────────────────────────────────────────────


class TestEvaluatePPIL1:
    def test_perfect_accuracy(self):
        preds = ["A", "B", "C", "D"]
        golds = ["A", "B", "C", "D"]
        result = evaluate_ppi_l1(preds, golds)
        assert result["accuracy"] == 1.0
        assert result["parse_rate"] == 1.0

    def test_zero_accuracy(self):
        preds = ["B", "A", "D", "C"]
        golds = ["A", "B", "C", "D"]
        result = evaluate_ppi_l1(preds, golds)
        assert result["accuracy"] == 0.0

    def test_per_class_accuracy(self):
        preds = ["A", "B", "C"]
        golds = ["A", "B", "D"]
        classes = ["intact", "huri", "string"]
        result = evaluate_ppi_l1(preds, golds, gold_classes=classes)
        assert "per_class_accuracy" in result
        assert result["per_class_accuracy"]["intact"] == 1.0
        assert result["per_class_accuracy"]["string"] == 0.0

    def test_per_difficulty_accuracy(self):
        preds = ["A", "A", "B", "B"]
        golds = ["A", "B", "B", "A"]
        diffs = ["easy", "easy", "hard", "hard"]
        result = evaluate_ppi_l1(preds, golds, difficulties=diffs)
        assert "per_difficulty_accuracy" in result
        assert result["per_difficulty_accuracy"]["easy"] == 0.5
        assert result["per_difficulty_accuracy"]["hard"] == 0.5

    def test_parse_failures(self):
        preds = ["A", "no valid response here at all", "C"]
        golds = ["A", "B", "C"]
        result = evaluate_ppi_l1(preds, golds)
        assert result["n_valid"] == 2
        assert result["parse_rate"] == pytest.approx(2 / 3)
        assert result["accuracy"] == 1.0

    def test_empty_predictions(self):
        result = evaluate_ppi_l1([], [])
        assert result["accuracy"] == 0.0
        assert result["n_total"] == 0

    def test_all_unparseable(self):
        preds = ["xyz", "hello", "???"]
        golds = ["A", "B", "C"]
        result = evaluate_ppi_l1(preds, golds)
        assert result["accuracy"] == 0.0
        assert result["n_valid"] == 0


# ── PPI-L2 Parser Tests ──────────────────────────────────────────────────


class TestParsePPIL2Response:
    def test_valid_json(self):
        obj = {"non_interacting_pairs": [{"protein_1": "TP53", "protein_2": "CDK2"}],
               "total_negative_count": 1}
        result = parse_ppi_l2_response(json.dumps(obj))
        assert result["total_negative_count"] == 1

    def test_json_with_code_fences(self):
        raw = '```json\n{"non_interacting_pairs": [], "total_negative_count": 0}\n```'
        result = parse_ppi_l2_response(raw)
        assert result["total_negative_count"] == 0

    def test_json_embedded_in_text(self):
        raw = 'Here is the result: {"non_interacting_pairs": []} as expected.'
        result = parse_ppi_l2_response(raw)
        assert result is not None
        assert "non_interacting_pairs" in result

    def test_invalid_json(self):
        assert parse_ppi_l2_response("not json at all") is None


# ── PPI-L2 Evaluator Tests ───────────────────────────────────────────────


class TestEvaluatePPIL2:
    def test_perfect_entity_matching(self):
        pred = json.dumps({
            "non_interacting_pairs": [
                {"protein_1": "TP53", "protein_2": "CDK2", "method": "co-IP", "evidence_strength": "strong"},
            ],
            "total_negative_count": 1,
            "positive_interactions_mentioned": False,
        })
        gold = {
            "gold_extraction": {
                "non_interacting_pairs": [
                    {"protein_1": "TP53", "protein_2": "CDK2", "method": "co-IP", "evidence_strength": "strong"},
                ],
                "total_negative_count": 1,
                "positive_interactions_mentioned": False,
            }
        }
        result = evaluate_ppi_l2([pred], [gold])
        assert result["entity_f1"] == 1.0
        assert result["schema_compliance"] == 1.0
        assert result["parse_rate"] == 1.0

    def test_missing_pair(self):
        pred = json.dumps({
            "non_interacting_pairs": [
                {"protein_1": "TP53", "protein_2": "CDK2"},
            ],
            "total_negative_count": 1,
            "positive_interactions_mentioned": False,
        })
        gold = {
            "gold_extraction": {
                "non_interacting_pairs": [
                    {"protein_1": "TP53", "protein_2": "CDK2"},
                    {"protein_1": "BRCA1", "protein_2": "ESR1"},
                ],
                "total_negative_count": 2,
                "positive_interactions_mentioned": False,
            }
        }
        result = evaluate_ppi_l2([pred], [gold])
        assert result["entity_recall"] == 0.5
        assert result["entity_precision"] == 1.0

    def test_count_accuracy(self):
        pred = json.dumps({
            "non_interacting_pairs": [],
            "total_negative_count": 3,
            "positive_interactions_mentioned": False,
        })
        gold = {"gold_extraction": {"non_interacting_pairs": [],
                "total_negative_count": 3, "positive_interactions_mentioned": False}}
        result = evaluate_ppi_l2([pred], [gold])
        assert result["count_accuracy"] == 1.0

    def test_parse_rate(self):
        preds = ['{"non_interacting_pairs": []}', "not json", '{"non_interacting_pairs": []}']
        golds = [
            {"gold_extraction": {"non_interacting_pairs": []}},
            {"gold_extraction": {"non_interacting_pairs": []}},
            {"gold_extraction": {"non_interacting_pairs": []}},
        ]
        result = evaluate_ppi_l2(preds, golds)
        assert result["parse_rate"] == pytest.approx(2 / 3)

    def test_method_accuracy_matching(self):
        """Method accuracy: substring match between predicted and gold method."""
        pred = json.dumps({
            "non_interacting_pairs": [
                {"protein_1": "TP53", "protein_2": "CDK2",
                 "method": "co-immunoprecipitation (co-IP) assay",
                 "evidence_strength": "strong"},
            ],
            "total_negative_count": 1,
            "positive_interactions_mentioned": False,
        })
        gold = {
            "gold_extraction": {
                "non_interacting_pairs": [
                    {"protein_1": "TP53", "protein_2": "CDK2",
                     "method": "co-immunoprecipitation (co-IP) assay",
                     "evidence_strength": "strong"},
                ],
                "total_negative_count": 1,
                "positive_interactions_mentioned": False,
            }
        }
        result = evaluate_ppi_l2([pred], [gold])
        assert result["method_accuracy"] == 1.0
        assert result["strength_accuracy"] == 1.0

    def test_method_accuracy_mismatch(self):
        """Mismatched method → method_accuracy=0."""
        pred = json.dumps({
            "non_interacting_pairs": [
                {"protein_1": "TP53", "protein_2": "CDK2",
                 "method": "co-fractionation proteomics",
                 "evidence_strength": "moderate"},
            ],
            "total_negative_count": 1,
            "positive_interactions_mentioned": False,
        })
        gold = {
            "gold_extraction": {
                "non_interacting_pairs": [
                    {"protein_1": "TP53", "protein_2": "CDK2",
                     "method": "binding assay",
                     "evidence_strength": "strong"},
                ],
                "total_negative_count": 1,
                "positive_interactions_mentioned": False,
            }
        }
        result = evaluate_ppi_l2([pred], [gold])
        assert result["method_accuracy"] == 0.0
        assert result["strength_accuracy"] == 0.0

    def test_method_accuracy_substring_match(self):
        """Substring match: gold 'binding assay' in pred 'affinity binding assay'."""
        pred = json.dumps({
            "non_interacting_pairs": [
                {"protein_1": "TP53", "protein_2": "CDK2",
                 "method": "affinity binding assay"},
            ],
            "total_negative_count": 1,
        })
        gold = {
            "gold_extraction": {
                "non_interacting_pairs": [
                    {"protein_1": "TP53", "protein_2": "CDK2",
                     "method": "binding assay"},
                ],
                "total_negative_count": 1,
            }
        }
        result = evaluate_ppi_l2([pred], [gold])
        assert result["method_accuracy"] == 1.0

    def test_method_accuracy_unmatched_pairs_not_counted(self):
        """Method accuracy only counts matched pairs (by protein names)."""
        pred = json.dumps({
            "non_interacting_pairs": [
                {"protein_1": "UNKNOWN1", "protein_2": "UNKNOWN2",
                 "method": "wrong method"},
            ],
            "total_negative_count": 1,
        })
        gold = {
            "gold_extraction": {
                "non_interacting_pairs": [
                    {"protein_1": "TP53", "protein_2": "CDK2",
                     "method": "binding assay"},
                ],
                "total_negative_count": 1,
            }
        }
        result = evaluate_ppi_l2([pred], [gold])
        # No matched pairs → 0/0 → 0.0
        assert result["method_accuracy"] == 0.0

    def test_empty_predictions(self):
        result = evaluate_ppi_l2([], [])
        assert result["n_total"] == 0


# ── PPI-L3 Judge Score Parser Tests ───────────────────────────────────────


class TestParsePPIL3JudgeScores:
    def test_valid_scores(self):
        resp = json.dumps({
            "biological_plausibility": 4,
            "structural_reasoning": 3,
            "mechanistic_completeness": 5,
            "specificity": 2,
        })
        scores = parse_ppi_l3_judge_scores(resp)
        assert scores == {
            "biological_plausibility": 4.0,
            "structural_reasoning": 3.0,
            "mechanistic_completeness": 5.0,
            "specificity": 2.0,
        }

    def test_out_of_range(self):
        resp = json.dumps({
            "biological_plausibility": 6,
            "structural_reasoning": 0,
            "mechanistic_completeness": 3,
            "specificity": 3,
        })
        scores = parse_ppi_l3_judge_scores(resp)
        assert scores is None  # 6 and 0 are out of range

    def test_missing_dimension(self):
        resp = json.dumps({
            "biological_plausibility": 4,
            "structural_reasoning": 3,
            "mechanistic_completeness": 5,
        })
        scores = parse_ppi_l3_judge_scores(resp)
        assert scores is None  # specificity missing

    def test_invalid_json(self):
        scores = parse_ppi_l3_judge_scores("not json")
        assert scores is None

    def test_ppi_specific_dimensions(self):
        """PPI L3 uses different dimensions than CT L3."""
        resp = json.dumps({
            "biological_plausibility": 4,
            "structural_reasoning": 3,
            "mechanistic_completeness": 5,
            "specificity": 2,
        })
        scores = parse_ppi_l3_judge_scores(resp)
        assert "biological_plausibility" in scores
        assert "structural_reasoning" in scores


# ── PPI-L3 Evaluator Tests ───────────────────────────────────────────────


class TestEvaluatePPIL3:
    def test_aggregation(self):
        scores = [
            {"biological_plausibility": 4.0, "structural_reasoning": 3.0,
             "mechanistic_completeness": 5.0, "specificity": 2.0},
            {"biological_plausibility": 2.0, "structural_reasoning": 5.0,
             "mechanistic_completeness": 3.0, "specificity": 4.0},
        ]
        result = evaluate_ppi_l3(scores)
        assert result["biological_plausibility"]["mean"] == pytest.approx(3.0)
        assert result["structural_reasoning"]["mean"] == pytest.approx(4.0)
        assert result["overall"]["mean"] == pytest.approx(3.5)
        assert result["n_valid"] == 2

    def test_none_handling(self):
        scores = [
            {"biological_plausibility": 4.0, "structural_reasoning": 3.0,
             "mechanistic_completeness": 5.0, "specificity": 2.0},
            None,
        ]
        result = evaluate_ppi_l3(scores)
        assert result["n_valid"] == 1
        assert result["n_total"] == 2

    def test_all_none(self):
        result = evaluate_ppi_l3([None, None])
        assert result["n_valid"] == 0
        assert result["biological_plausibility"]["mean"] == 0.0

    def test_empty(self):
        result = evaluate_ppi_l3([])
        assert result["n_valid"] == 0


# ── PPI-L4 Parser Tests ──────────────────────────────────────────────────


class TestParsePPIL4Answer:
    def test_tested(self):
        answer, evidence = parse_ppi_l4_answer("tested\nIntAct curated non-interaction")
        assert answer == "tested"
        assert "IntAct" in evidence

    def test_untested(self):
        answer, evidence = parse_ppi_l4_answer("untested\nNo interaction databases found")
        assert answer == "untested"
        assert "No interaction" in evidence

    def test_not_tested_variant(self):
        answer, _ = parse_ppi_l4_answer("not tested\nReasoning...")
        assert answer == "untested"

    def test_not_been_tested_variant(self):
        answer, _ = parse_ppi_l4_answer("This pair has not been tested\nEvidence...")
        assert answer == "untested"

    def test_never_been_tested_variant(self):
        answer, _ = parse_ppi_l4_answer("This pair has never been tested in any study.")
        assert answer == "untested"

    def test_no_evidence(self):
        answer, evidence = parse_ppi_l4_answer("tested")
        assert answer == "tested"
        assert evidence is None

    def test_empty(self):
        answer, evidence = parse_ppi_l4_answer("")
        assert answer is None
        assert evidence is None


# ── PPI-L4 Evaluator Tests ───────────────────────────────────────────────


class TestEvaluatePPIL4:
    def test_perfect_accuracy(self):
        preds = ["tested\nIntAct", "untested\nNo data"]
        golds = ["tested", "untested"]
        result = evaluate_ppi_l4(preds, golds)
        assert result["accuracy"] == 1.0

    def test_temporal_pre_2015_post_2020(self):
        """PPI uses pre_2015/post_2020 (not CT's pre_2020/post_2023)."""
        preds = ["tested\nIntAct", "tested\nHuRI", "untested\nNone", "tested\nBioGRID"]
        golds = ["tested", "tested", "untested", "untested"]
        temporal = ["pre_2015", "post_2020", "pre_2015", "post_2020"]
        result = evaluate_ppi_l4(preds, golds, temporal_groups=temporal)
        assert result["accuracy_pre_2015"] == 1.0
        assert result["accuracy_post_2020"] == 0.5

    def test_contamination_flag(self):
        """Flag when pre_2015 accuracy exceeds post_2020 by >15%."""
        preds = ["tested\nA", "tested\nB", "tested\nC", "untested\nD",
                  "tested\nE", "tested\nF", "tested\nG", "tested\nH"]
        golds = ["tested", "tested", "tested", "untested",
                  "untested", "untested", "untested", "untested"]
        temporal = ["pre_2015", "pre_2015", "pre_2015", "pre_2015",
                     "post_2020", "post_2020", "post_2020", "post_2020"]
        result = evaluate_ppi_l4(preds, golds, temporal)
        assert result["contamination_flag"] is True
        assert result["contamination_gap"] == pytest.approx(1.0)

    def test_no_contamination(self):
        preds = ["tested\nA", "untested\nB"]
        golds = ["tested", "untested"]
        temporal = ["pre_2015", "post_2020"]
        result = evaluate_ppi_l4(preds, golds, temporal)
        assert result["contamination_flag"] is False

    def test_evidence_citation_rate_and_logic(self):
        """Evidence needs BOTH >50 chars AND domain keyword (AND logic)."""
        preds = [
            # >50 chars AND contains "intact" → pass
            "tested\nThis pair was tested in IntAct database using a co-immunoprecipitation assay with strong results",
            # >50 chars but NO keyword → fail
            "tested\nI think this pair was definitely tested somewhere in some large study with lots of results",
            # <50 chars but has keyword → fail
            "tested\nIntAct co-IP",
            # >50 chars AND keyword → pass
            "tested\nThe proteins were tested via yeast two-hybrid screening in the HuRI project with multiple replicates",
        ]
        golds = ["tested", "tested", "tested", "tested"]
        result = evaluate_ppi_l4(preds, golds)
        # Only 2 of 4 pass both conditions
        assert result["evidence_citation_rate"] == pytest.approx(0.5)

    def test_ppi_evidence_keywords(self):
        """PPI-specific keywords differ from CT and DTI."""
        assert "intact" in PPI_EVIDENCE_KEYWORDS
        assert "huri" in PPI_EVIDENCE_KEYWORDS
        assert "biogrid" in PPI_EVIDENCE_KEYWORDS
        assert "co-ip" in PPI_EVIDENCE_KEYWORDS
        assert "two-hybrid" in PPI_EVIDENCE_KEYWORDS
        assert "pulldown" in PPI_EVIDENCE_KEYWORDS
        # CT/DTI keywords should NOT be here
        assert "nct" not in PPI_EVIDENCE_KEYWORDS
        assert "chembl" not in PPI_EVIDENCE_KEYWORDS

    def test_empty(self):
        result = evaluate_ppi_l4([], [])
        assert result["accuracy"] == 0.0


# ── Dispatch Tests ───────────────────────────────────────────────────────


class TestDispatch:
    def test_ppi_l1_dispatch(self):
        preds = ["A", "B"]
        gold = [{"gold_answer": "A", "gold_category": "intact", "difficulty": "easy"},
                {"gold_answer": "B", "gold_category": "huri", "difficulty": "hard"}]
        result = compute_all_ppi_llm_metrics("ppi-l1", preds, gold)
        assert result["accuracy"] == 1.0

    def test_ppi_l4_dispatch(self):
        preds = ["tested\nIntAct", "untested\nNone"]
        gold = [{"gold_answer": "tested", "temporal_group": "pre_2015"},
                {"gold_answer": "untested", "temporal_group": "post_2020"}]
        result = compute_all_ppi_llm_metrics("ppi-l4", preds, gold)
        assert result["accuracy"] == 1.0

    def test_invalid_task_raises(self):
        with pytest.raises(ValueError, match="Unknown task"):
            compute_all_ppi_llm_metrics("l1", ["A"], [{"gold_answer": "A"}])

    def test_ppi_l2_dispatch(self):
        pred_obj = {"non_interacting_pairs": [{"protein_1": "X", "protein_2": "Y"}],
                     "total_negative_count": 1, "positive_interactions_mentioned": False}
        preds = [json.dumps(pred_obj)]
        gold = [{"gold_extraction": {"non_interacting_pairs": [{"protein_1": "X", "protein_2": "Y"}],
                 "total_negative_count": 1, "positive_interactions_mentioned": False}}]
        result = compute_all_ppi_llm_metrics("ppi-l2", preds, gold)
        assert result["entity_f1"] == 1.0

    def test_ppi_l3_dispatch(self):
        resp = json.dumps({
            "biological_plausibility": 4,
            "structural_reasoning": 3,
            "mechanistic_completeness": 5,
            "specificity": 2,
        })
        result = compute_all_ppi_llm_metrics("ppi-l3", [resp], [{}])
        assert result["n_valid"] == 1