Datasets:
Formats:
parquet
Languages:
English
Size:
10M - 100M
Tags:
biology
chemistry
drug-discovery
clinical-trials
protein-protein-interaction
gene-essentiality
License:
File size: 6,332 Bytes
6d1bbc7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 | #!/usr/bin/env python3
"""Export all LLM benchmark prompt templates to a reproducibility appendix.
Reads templates from src/negbiodb/llm_prompts.py and the judge rubric from
src/negbiodb/llm_eval.py, then writes docs/appendix_prompts.md.
Usage:
python scripts/export_prompt_appendix.py
"""
from pathlib import Path
from negbiodb.llm_eval import L3_JUDGE_PROMPT
from negbiodb.llm_prompts import (
L1_ANSWER_FORMAT,
L1_FEW_SHOT,
L1_ZERO_SHOT,
L2_FEW_SHOT,
L2_ZERO_SHOT,
L3_FEW_SHOT,
L3_ZERO_SHOT,
L4_ANSWER_FORMAT,
L4_FEW_SHOT,
L4_ZERO_SHOT,
SYSTEM_PROMPT,
)
PROJECT_ROOT = Path(__file__).resolve().parent.parent
OUTPUT = PROJECT_ROOT / "docs" / "appendix_prompts.md"
def code_block(text: str, lang: str = "") -> str:
"""Wrap text in a fenced code block."""
return f"```{lang}\n{text}\n```"
def main():
sections = []
sections.append("# Appendix A: LLM Benchmark Prompt Templates\n")
sections.append(
"This appendix documents all prompt templates used in the NegBioDB "
"LLM benchmark (tasks L1--L4). Templates are reproduced verbatim "
"from `src/negbiodb/llm_prompts.py` and `src/negbiodb/llm_eval.py`.\n"
)
# A.1 System Prompt
sections.append("## A.1 System Prompt (Shared Across All Tasks)\n")
sections.append(code_block(SYSTEM_PROMPT))
sections.append("")
# A.2 L1
sections.append("## A.2 L1: Activity Classification (Multiple Choice)\n")
sections.append("### A.2.1 Zero-Shot Template\n")
sections.append(code_block(L1_ZERO_SHOT))
sections.append("")
sections.append("### A.2.2 Few-Shot Template\n")
sections.append(code_block(L1_FEW_SHOT))
sections.append("")
sections.append("### A.2.3 Answer Format Instruction\n")
sections.append(code_block(L1_ANSWER_FORMAT))
sections.append(
"\nThe answer format instruction is appended after both zero-shot "
"and few-shot templates.\n"
)
# A.3 L2
sections.append("## A.3 L2: Structured Extraction\n")
sections.append("### A.3.1 Zero-Shot Template\n")
sections.append(code_block(L2_ZERO_SHOT))
sections.append("")
sections.append("### A.3.2 Few-Shot Template\n")
sections.append(code_block(L2_FEW_SHOT))
sections.append(
"\nFew-shot examples include the abstract text and the corresponding "
"gold extraction in JSON format, separated by `---` delimiters.\n"
)
# A.4 L3
sections.append("## A.4 L3: Scientific Reasoning\n")
sections.append("### A.4.1 Zero-Shot Template\n")
sections.append(code_block(L3_ZERO_SHOT))
sections.append("")
sections.append("### A.4.2 Few-Shot Template\n")
sections.append(code_block(L3_FEW_SHOT))
sections.append("")
sections.append("### A.4.3 LLM-as-Judge Rubric\n")
sections.append(
"Responses are evaluated by a judge model (Gemini 2.5 Flash-Lite) "
"using the following rubric:\n"
)
sections.append(code_block(L3_JUDGE_PROMPT))
sections.append(
"\nThe judge returns scores as JSON with four dimensions "
"(accuracy, reasoning, completeness, specificity), each rated 1--5.\n"
)
# A.5 L4
sections.append("## A.5 L4: Tested vs Untested Discrimination\n")
sections.append("### A.5.1 Zero-Shot Template\n")
sections.append(code_block(L4_ZERO_SHOT))
sections.append("")
sections.append("### A.5.2 Few-Shot Template\n")
sections.append(code_block(L4_FEW_SHOT))
sections.append("")
sections.append("### A.5.3 Answer Format Instruction\n")
sections.append(code_block(L4_ANSWER_FORMAT))
sections.append(
"\nThe answer format instruction is appended after both zero-shot "
"and few-shot templates.\n"
)
# A.6 Model Configuration
sections.append("## A.6 Model Configuration\n")
sections.append("| Parameter | Value |")
sections.append("|-----------|-------|")
sections.append("| Temperature | 0.0 (deterministic) |")
sections.append("| Max output tokens | 1024 (L1/L4), 2048 (L2/L3) |")
sections.append("| Few-shot sets | 3 independent sets (fs0, fs1, fs2) |")
sections.append("| Retry policy | Exponential backoff, max 8 retries |")
sections.append("")
sections.append("### Models\n")
sections.append("| Model | Provider | Inference |")
sections.append("|-------|----------|-----------|")
sections.append("| Llama-3.3-70B-Instruct-AWQ | vLLM | Local (A100 GPU) |")
sections.append("| Qwen2.5-32B-Instruct-AWQ | vLLM | Local (A100 GPU) |")
sections.append("| Mistral-7B-Instruct-v0.3 | vLLM | Local (A100 GPU) |")
sections.append("| GPT-4o-mini | OpenAI API | Cloud |")
sections.append("| Gemini 2.5 Flash | Google Gemini API | Cloud |")
sections.append("| Gemini 2.5 Flash-Lite | Google Gemini API | Cloud |")
sections.append("")
sections.append(
"Gemini 2.5 Flash uses `thinkingConfig: {thinkingBudget: 0}` to "
"disable internal reasoning tokens and ensure the full output budget "
"is available for the response.\n"
)
# Write output
OUTPUT.parent.mkdir(parents=True, exist_ok=True)
text = "\n".join(sections)
OUTPUT.write_text(text)
print(f"Written: {OUTPUT}")
print(f" Lines: {len(text.splitlines())}")
# Verify completeness: check that each template's content appears in output
checks = {
"SYSTEM_PROMPT": SYSTEM_PROMPT[:40],
"L1_ZERO_SHOT": "{context}",
"L1_FEW_SHOT": "examples of drug-target interaction classification",
"L1_ANSWER_FORMAT": "Respond with ONLY the letter",
"L2_ZERO_SHOT": "Extract all negative drug-target interaction",
"L2_FEW_SHOT": "extract from this abstract",
"L3_ZERO_SHOT": "Structural compatibility",
"L3_FEW_SHOT": "examples of scientific reasoning",
"L3_JUDGE_PROMPT": "Rate the following scientific explanation",
"L4_ZERO_SHOT": "{context}",
"L4_FEW_SHOT": "tested/untested compound-target pair",
"L4_ANSWER_FORMAT": "tested' or 'untested'",
}
missing = [name for name, snippet in checks.items() if snippet not in text]
if missing:
print(f" WARNING: Missing templates: {missing}")
else:
print(" All 12 templates included.")
if __name__ == "__main__":
main()
|