Datasets:
Formats:
parquet
Languages:
English
Size:
10M - 100M
Tags:
biology
chemistry
drug-discovery
clinical-trials
protein-protein-interaction
gene-essentiality
License:
Commit ·
6d1bbc7
0
Parent(s):
NegBioDB final: 4 domains, fully audited
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +60 -0
- .gitignore +58 -0
- LICENSE +27 -0
- Makefile +180 -0
- PATCH_NOTES_2026-03-10.md +330 -0
- PROJECT_OVERVIEW.md +187 -0
- README.md +403 -0
- ROADMAP.md +561 -0
- config.yaml +213 -0
- docs/appendix_prompts.md +180 -0
- docs/methodology_notes.md +38 -0
- experiment_results.md +181 -0
- migrations/001_initial_schema.sql +241 -0
- migrations/002_target_variants.sql +30 -0
- migrations_ct/001_ct_initial_schema.sql +258 -0
- migrations_ct/002_schema_fixes.sql +48 -0
- migrations_depmap/001_ge_initial_schema.sql +221 -0
- migrations_ppi/001_ppi_initial_schema.sql +178 -0
- migrations_ppi/002_llm_annotations.sql +18 -0
- paper/appendix/app_checklist.tex +1 -0
- paper/appendix/app_contamination.tex +104 -0
- paper/appendix/app_croissant.tex +52 -0
- paper/appendix/app_datasheet.tex +119 -0
- paper/appendix/app_l3_analysis.tex +53 -0
- paper/appendix/app_llm_tables.tex +279 -0
- paper/appendix/app_ml_tables.tex +214 -0
- paper/appendix/app_prompts.tex +258 -0
- paper/appendix/app_schema.tex +139 -0
- paper/appendix/app_splits.tex +56 -0
- paper/appendix/appendix_main.tex +12 -0
- paper/checklist.tex +85 -0
- paper/main.tex +48 -0
- paper/neurips_2026.sty +437 -0
- paper/neurips_2026.tex +493 -0
- paper/references.bib +463 -0
- paper/scripts/generate_figures.py +343 -0
- paper/sections/abstract.tex +3 -0
- paper/sections/benchmark.tex +40 -0
- paper/sections/database.tex +53 -0
- paper/sections/discussion.tex +17 -0
- paper/sections/experiments.tex +125 -0
- paper/sections/introduction.tex +20 -0
- pyproject.toml +45 -0
- scripts/analyze_l4_contamination.py +95 -0
- scripts/build_compound_names.py +238 -0
- scripts/build_l1_dataset.py +845 -0
- scripts/build_l2_dataset.py +200 -0
- scripts/build_l3_dataset.py +234 -0
- scripts/build_l4_dataset.py +446 -0
- scripts/collect_llm_results.py +245 -0
.gitattributes
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.avro filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.mds filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
# Audio files - uncompressed
|
| 40 |
+
*.pcm filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
*.sam filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
*.raw filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
# Audio files - compressed
|
| 44 |
+
*.aac filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
# Image files - uncompressed
|
| 50 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
# Image files - compressed
|
| 55 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 58 |
+
# Video files - compressed
|
| 59 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
*.webm filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Data directories (large, downloaded)
|
| 2 |
+
data/
|
| 3 |
+
exports/
|
| 4 |
+
|
| 5 |
+
# Experiment results & runtime logs (generated, large)
|
| 6 |
+
results/
|
| 7 |
+
logs/
|
| 8 |
+
|
| 9 |
+
# Database files
|
| 10 |
+
*.db
|
| 11 |
+
*.sqlite
|
| 12 |
+
*.sqlite3
|
| 13 |
+
|
| 14 |
+
# Python
|
| 15 |
+
__pycache__/
|
| 16 |
+
*.py[cod]
|
| 17 |
+
*$py.class
|
| 18 |
+
*.egg-info/
|
| 19 |
+
*.egg
|
| 20 |
+
dist/
|
| 21 |
+
build/
|
| 22 |
+
.eggs/
|
| 23 |
+
|
| 24 |
+
# Virtual environments
|
| 25 |
+
.venv/
|
| 26 |
+
venv/
|
| 27 |
+
|
| 28 |
+
# Environment variables
|
| 29 |
+
.env
|
| 30 |
+
|
| 31 |
+
# OS
|
| 32 |
+
.DS_Store
|
| 33 |
+
|
| 34 |
+
# IDE
|
| 35 |
+
.idea/
|
| 36 |
+
.vscode/
|
| 37 |
+
|
| 38 |
+
# Claude Code local settings (contain tool permissions and local paths)
|
| 39 |
+
.claude/
|
| 40 |
+
|
| 41 |
+
# Testing
|
| 42 |
+
.pytest_cache/
|
| 43 |
+
.coverage
|
| 44 |
+
htmlcov/
|
| 45 |
+
|
| 46 |
+
# Internal planning documents (not for public release)
|
| 47 |
+
research/
|
| 48 |
+
NeurIPS_DB_Track_Research.md
|
| 49 |
+
|
| 50 |
+
# Binary files (PDFs) — not suitable for git/HuggingFace datasets
|
| 51 |
+
paper/main.pdf
|
| 52 |
+
paper/*.aux
|
| 53 |
+
paper/*.bbl
|
| 54 |
+
paper/*.blg
|
| 55 |
+
paper/*.log
|
| 56 |
+
paper/*.out
|
| 57 |
+
paper/missfont.log
|
| 58 |
+
paper/figures/*.pdf
|
LICENSE
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2026 James Jang
|
| 4 |
+
|
| 5 |
+
You are free to:
|
| 6 |
+
Share — copy and redistribute the material in any medium or format
|
| 7 |
+
Adapt — remix, transform, and build upon the material for any purpose, even commercially
|
| 8 |
+
|
| 9 |
+
Under the following terms:
|
| 10 |
+
Attribution — You must give appropriate credit, provide a link to the license,
|
| 11 |
+
and indicate if changes were made. You may do so in any reasonable manner, but
|
| 12 |
+
not in any way that suggests the licensor endorses you or your use.
|
| 13 |
+
|
| 14 |
+
ShareAlike — If you remix, transform, or build upon the material, you must
|
| 15 |
+
distribute your contributions under the same license as the original.
|
| 16 |
+
|
| 17 |
+
No additional restrictions — You may not apply legal terms or technological
|
| 18 |
+
measures that legally restrict others from doing anything the license permits.
|
| 19 |
+
|
| 20 |
+
Full license text: https://creativecommons.org/licenses/by-sa/4.0/legalcode
|
| 21 |
+
|
| 22 |
+
---
|
| 23 |
+
|
| 24 |
+
Note on upstream licenses:
|
| 25 |
+
- ChEMBL (v36) data is CC BY-SA 3.0, which requires CC BY-SA 4.0 for derived works.
|
| 26 |
+
- All other upstream sources (PubChem, AACT, HuRI, etc.) are Public Domain or
|
| 27 |
+
permissively licensed (CC BY, Apache 2.0, MIT), compatible with CC BY-SA 4.0.
|
Makefile
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# NegBioDB Pipeline Makefile
|
| 2 |
+
|
| 3 |
+
.PHONY: setup db test clean \
|
| 4 |
+
ge-db ge-download-depmap ge-load-depmap ge-load-rnai ge-export ge-all ge-clean ge-test
|
| 5 |
+
|
| 6 |
+
# === Week 1: Scaffolding ===
|
| 7 |
+
|
| 8 |
+
setup:
|
| 9 |
+
uv venv
|
| 10 |
+
uv sync --all-extras
|
| 11 |
+
mkdir -p data exports
|
| 12 |
+
|
| 13 |
+
db: setup
|
| 14 |
+
uv run python -c "from negbiodb.db import create_database; create_database()"
|
| 15 |
+
|
| 16 |
+
test: setup
|
| 17 |
+
uv run pytest tests/ -v
|
| 18 |
+
|
| 19 |
+
clean:
|
| 20 |
+
rm -f data/negbiodb.db
|
| 21 |
+
rm -rf exports/* __pycache__ .pytest_cache
|
| 22 |
+
find . -name "*.pyc" -delete 2>/dev/null || true
|
| 23 |
+
|
| 24 |
+
# === Week 2: Data Download ===
|
| 25 |
+
|
| 26 |
+
.PHONY: download download-pubchem download-chembl download-bindingdb download-davis
|
| 27 |
+
|
| 28 |
+
download-pubchem: setup
|
| 29 |
+
uv run python scripts/download_pubchem.py
|
| 30 |
+
|
| 31 |
+
download-chembl: setup
|
| 32 |
+
uv run python scripts/download_chembl.py
|
| 33 |
+
|
| 34 |
+
download-bindingdb: setup
|
| 35 |
+
uv run python scripts/download_bindingdb.py
|
| 36 |
+
|
| 37 |
+
download-davis: setup
|
| 38 |
+
uv run python scripts/download_davis.py
|
| 39 |
+
|
| 40 |
+
download: download-pubchem download-chembl download-bindingdb download-davis
|
| 41 |
+
|
| 42 |
+
# === ETL: Load Sources ===
|
| 43 |
+
|
| 44 |
+
.PHONY: load-davis load-chembl load-pubchem load-bindingdb load-all
|
| 45 |
+
|
| 46 |
+
load-davis: db download-davis
|
| 47 |
+
uv run python scripts/load_davis.py
|
| 48 |
+
|
| 49 |
+
load-chembl: db download-chembl
|
| 50 |
+
uv run python scripts/load_chembl.py
|
| 51 |
+
|
| 52 |
+
load-pubchem: db download-pubchem
|
| 53 |
+
uv run python scripts/load_pubchem.py
|
| 54 |
+
|
| 55 |
+
load-bindingdb: db download-bindingdb
|
| 56 |
+
uv run python scripts/load_bindingdb.py
|
| 57 |
+
|
| 58 |
+
load-all: load-davis load-chembl load-pubchem load-bindingdb
|
| 59 |
+
|
| 60 |
+
# ============================================================
|
| 61 |
+
# Clinical Trial Failure Domain
|
| 62 |
+
# ============================================================
|
| 63 |
+
|
| 64 |
+
.PHONY: ct-db ct-download ct-load-aact ct-classify ct-resolve ct-outcomes ct-all ct-clean
|
| 65 |
+
|
| 66 |
+
ct-db: setup
|
| 67 |
+
uv run python -c "from negbiodb_ct.ct_db import create_ct_database; create_ct_database()"
|
| 68 |
+
|
| 69 |
+
ct-download-aact: setup
|
| 70 |
+
uv run python scripts_ct/download_aact.py
|
| 71 |
+
|
| 72 |
+
ct-download-opentargets: setup
|
| 73 |
+
uv run python scripts_ct/download_opentargets.py
|
| 74 |
+
|
| 75 |
+
ct-download-cto: setup
|
| 76 |
+
uv run python scripts_ct/download_cto.py
|
| 77 |
+
|
| 78 |
+
ct-download-shi-du: setup
|
| 79 |
+
uv run python scripts_ct/download_shi_du.py
|
| 80 |
+
|
| 81 |
+
ct-download: ct-download-opentargets ct-download-cto ct-download-shi-du
|
| 82 |
+
@echo "NOTE: AACT requires --url flag. Run: make ct-download-aact"
|
| 83 |
+
|
| 84 |
+
ct-load-aact: ct-db
|
| 85 |
+
uv run python scripts_ct/load_aact.py
|
| 86 |
+
|
| 87 |
+
ct-classify: ct-load-aact
|
| 88 |
+
uv run python scripts_ct/classify_failures.py
|
| 89 |
+
|
| 90 |
+
ct-resolve: ct-load-aact
|
| 91 |
+
uv run python scripts_ct/resolve_drugs.py
|
| 92 |
+
|
| 93 |
+
ct-outcomes: ct-classify
|
| 94 |
+
uv run python scripts_ct/load_outcomes.py
|
| 95 |
+
|
| 96 |
+
ct-all: ct-db ct-load-aact ct-classify ct-resolve ct-outcomes
|
| 97 |
+
@echo "CT pipeline complete."
|
| 98 |
+
|
| 99 |
+
ct-clean:
|
| 100 |
+
rm -f data/negbiodb_ct.db
|
| 101 |
+
@echo "CT database removed."
|
| 102 |
+
|
| 103 |
+
ct-test: setup
|
| 104 |
+
uv run pytest tests/test_ct_db.py tests/test_etl_aact.py tests/test_etl_classify.py tests/test_drug_resolver.py tests/test_etl_outcomes.py -v
|
| 105 |
+
|
| 106 |
+
# ============================================================
|
| 107 |
+
# Protein-Protein Interaction Negative Domain
|
| 108 |
+
# ============================================================
|
| 109 |
+
|
| 110 |
+
.PHONY: ppi-db ppi-download ppi-load-huri ppi-load-intact ppi-load-humap ppi-load-string ppi-all ppi-clean ppi-test
|
| 111 |
+
|
| 112 |
+
ppi-db: setup
|
| 113 |
+
uv run python -c "from negbiodb_ppi.ppi_db import create_ppi_database; create_ppi_database()"
|
| 114 |
+
|
| 115 |
+
ppi-download-huri: setup
|
| 116 |
+
uv run python scripts_ppi/download_huri.py
|
| 117 |
+
|
| 118 |
+
ppi-download-intact: setup
|
| 119 |
+
uv run python scripts_ppi/download_intact.py
|
| 120 |
+
|
| 121 |
+
ppi-download-humap: setup
|
| 122 |
+
uv run python scripts_ppi/download_humap.py
|
| 123 |
+
|
| 124 |
+
ppi-download-string: setup
|
| 125 |
+
uv run python scripts_ppi/download_string.py
|
| 126 |
+
|
| 127 |
+
ppi-download-biogrid: setup
|
| 128 |
+
uv run python scripts_ppi/download_biogrid.py
|
| 129 |
+
|
| 130 |
+
ppi-download: ppi-download-huri ppi-download-intact ppi-download-humap ppi-download-string ppi-download-biogrid
|
| 131 |
+
|
| 132 |
+
ppi-load-huri: ppi-db ppi-download-huri
|
| 133 |
+
uv run python scripts_ppi/load_huri.py
|
| 134 |
+
|
| 135 |
+
ppi-load-intact: ppi-db ppi-download-intact
|
| 136 |
+
uv run python scripts_ppi/load_intact.py
|
| 137 |
+
|
| 138 |
+
ppi-load-humap: ppi-db ppi-download-humap
|
| 139 |
+
uv run python scripts_ppi/load_humap.py
|
| 140 |
+
|
| 141 |
+
ppi-load-string: ppi-db ppi-download-string
|
| 142 |
+
uv run python scripts_ppi/load_string.py
|
| 143 |
+
|
| 144 |
+
ppi-all: ppi-db ppi-load-huri ppi-load-intact ppi-load-humap ppi-load-string
|
| 145 |
+
@echo "PPI pipeline complete."
|
| 146 |
+
|
| 147 |
+
ppi-clean:
|
| 148 |
+
rm -f data/negbiodb_ppi.db
|
| 149 |
+
@echo "PPI database removed."
|
| 150 |
+
|
| 151 |
+
ppi-test: setup
|
| 152 |
+
uv run pytest tests/test_ppi_db.py tests/test_protein_mapper.py tests/test_etl_huri.py tests/test_etl_intact.py tests/test_etl_humap.py tests/test_etl_string.py -v
|
| 153 |
+
|
| 154 |
+
# === GE/DepMap Domain ===
|
| 155 |
+
|
| 156 |
+
ge-db:
|
| 157 |
+
uv run python -c "from negbiodb_depmap.depmap_db import init_db; init_db()"
|
| 158 |
+
@echo "GE database initialized."
|
| 159 |
+
|
| 160 |
+
ge-download-depmap:
|
| 161 |
+
uv run python scripts_depmap/download_depmap.py
|
| 162 |
+
|
| 163 |
+
ge-load-depmap: ge-db ge-download-depmap
|
| 164 |
+
uv run python scripts_depmap/load_depmap.py
|
| 165 |
+
|
| 166 |
+
ge-load-rnai: ge-db ge-download-depmap
|
| 167 |
+
uv run python scripts_depmap/load_rnai.py
|
| 168 |
+
|
| 169 |
+
ge-export: ge-db
|
| 170 |
+
uv run python scripts_depmap/export_ge_ml_dataset.py
|
| 171 |
+
|
| 172 |
+
ge-all: ge-db ge-load-depmap ge-load-rnai ge-export
|
| 173 |
+
@echo "GE pipeline complete."
|
| 174 |
+
|
| 175 |
+
ge-clean:
|
| 176 |
+
rm -f data/negbiodb_depmap.db
|
| 177 |
+
@echo "GE database removed."
|
| 178 |
+
|
| 179 |
+
ge-test: setup
|
| 180 |
+
uv run pytest tests/test_ge_db.py tests/test_ge_features.py tests/test_etl_depmap.py tests/test_etl_rnai.py tests/test_ge_export.py -v
|
PATCH_NOTES_2026-03-10.md
ADDED
|
@@ -0,0 +1,330 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Patch Notes: Experiment Pipeline Fixes
|
| 2 |
+
|
| 3 |
+
Date: 2026-03-10
|
| 4 |
+
|
| 5 |
+
## Scope
|
| 6 |
+
|
| 7 |
+
This patch fixes experiment orchestration bugs found during review across:
|
| 8 |
+
|
| 9 |
+
- `src/negbiodb/export.py`
|
| 10 |
+
- `scripts/train_baseline.py`
|
| 11 |
+
- `scripts/prepare_exp_data.py`
|
| 12 |
+
- `scripts/collect_results.py`
|
| 13 |
+
- `tests/test_pipeline_scripts.py`
|
| 14 |
+
|
| 15 |
+
## Bugs Fixed
|
| 16 |
+
|
| 17 |
+
### 1. Split export row fanout with multiple split versions
|
| 18 |
+
|
| 19 |
+
Problem:
|
| 20 |
+
- `export_negative_dataset()` joined split columns by `split_strategy`.
|
| 21 |
+
- If `random_v2` or another later split definition existed for the same strategy,
|
| 22 |
+
the export query could duplicate rows.
|
| 23 |
+
|
| 24 |
+
Fix:
|
| 25 |
+
- Export now resolves a single latest `split_id` per strategy before joining
|
| 26 |
+
`split_assignments`.
|
| 27 |
+
- Cold split integrity checks were aligned to use the latest split definition too.
|
| 28 |
+
- Split selection now prefers explicit version semantics (`version` column and `_vN`
|
| 29 |
+
suffixes) rather than raw insertion order.
|
| 30 |
+
|
| 31 |
+
Impact:
|
| 32 |
+
- Prevents silent corruption of `negbiodb_dti_pairs.parquet` when new split versions are added.
|
| 33 |
+
|
| 34 |
+
### 2. Incomplete graph cache silently degrading GraphDTA/DrugBAN
|
| 35 |
+
|
| 36 |
+
Problem:
|
| 37 |
+
- Existing `graph_cache.pt` files could be incomplete.
|
| 38 |
+
- Missing SMILES fell through to placeholder graphs during batching.
|
| 39 |
+
- When no cache existed, train fold could save a train-only cache that val/test later reused.
|
| 40 |
+
|
| 41 |
+
Fix:
|
| 42 |
+
- Added `_prepare_graph_cache()` in `train_baseline.py`.
|
| 43 |
+
- The cache is now built or backfilled against the full dataset parquet before folds are created.
|
| 44 |
+
- `DTIDataset._load_graphs()` also backfills missing SMILES if a partial cache path is still passed in.
|
| 45 |
+
|
| 46 |
+
Impact:
|
| 47 |
+
- Prevents valid molecules from silently training/evaluating as dummy graphs.
|
| 48 |
+
|
| 49 |
+
### 3. Training run outputs overwriting each other
|
| 50 |
+
|
| 51 |
+
Problem:
|
| 52 |
+
- Output directories were keyed only by `model/split/negative`.
|
| 53 |
+
- Different datasets or seeds could overwrite prior runs.
|
| 54 |
+
|
| 55 |
+
Fix:
|
| 56 |
+
- Run names now include `dataset` and `seed`.
|
| 57 |
+
- Added `_build_run_name()` helper to keep naming deterministic and testable.
|
| 58 |
+
|
| 59 |
+
Impact:
|
| 60 |
+
- Balanced vs. realistic and multi-seed runs can now coexist safely.
|
| 61 |
+
|
| 62 |
+
### 3b. Unsupported realistic Exp 1 controls now fail fast
|
| 63 |
+
|
| 64 |
+
Problem:
|
| 65 |
+
- `uniform_random` and `degree_matched` controls did not have realistic dataset variants.
|
| 66 |
+
- The CLI still accepted `--dataset realistic` for those negatives, which mislabeled balanced-control runs as realistic.
|
| 67 |
+
|
| 68 |
+
Fix:
|
| 69 |
+
- Removed those unsupported mappings from `train_baseline.py`.
|
| 70 |
+
- The CLI now rejects realistic random-control combinations explicitly.
|
| 71 |
+
|
| 72 |
+
Impact:
|
| 73 |
+
- Prevents mislabeled Exp 1 results from being written to disk and summarized later.
|
| 74 |
+
|
| 75 |
+
### 4. Invalid DDB CLI combinations
|
| 76 |
+
|
| 77 |
+
Problem:
|
| 78 |
+
- `ddb` was exposed as a negative source even though it is a split mode.
|
| 79 |
+
- Commands like `--split random --negative ddb` were accepted.
|
| 80 |
+
|
| 81 |
+
Fix:
|
| 82 |
+
- Removed `ddb` from `--negative` choices.
|
| 83 |
+
- Added explicit validation so DDB is only valid for `split=ddb`, `negative=negbiodb`,
|
| 84 |
+
and `dataset=balanced`.
|
| 85 |
+
|
| 86 |
+
Impact:
|
| 87 |
+
- Reduces mislabeled experiment runs and ambiguous result directories.
|
| 88 |
+
|
| 89 |
+
### 5. `prepare_exp_data.py --skip-exp4` still required the pairs parquet
|
| 90 |
+
|
| 91 |
+
Problem:
|
| 92 |
+
- Input validation always required `exports/negbiodb_dti_pairs.parquet`,
|
| 93 |
+
even when Exp 4 was being skipped.
|
| 94 |
+
|
| 95 |
+
Fix:
|
| 96 |
+
- Conditionalized required inputs so the large pairs parquet is only required when Exp 4 runs.
|
| 97 |
+
- Added defensive deduplication and `many_to_one` merge validation in `prepare_exp4_ddb()`.
|
| 98 |
+
|
| 99 |
+
Impact:
|
| 100 |
+
- Exp 1 preparation can now run independently.
|
| 101 |
+
|
| 102 |
+
### 6. Result collection mixed datasets and hid seed information
|
| 103 |
+
|
| 104 |
+
Problem:
|
| 105 |
+
- `collect_results.py` dropped `dataset` and `seed` from Table 1 output.
|
| 106 |
+
- Exp 1 summary selected the first matching row instead of grouping safely.
|
| 107 |
+
|
| 108 |
+
Fix:
|
| 109 |
+
- Table output now preserves `dataset` and `seed`.
|
| 110 |
+
- Exp 1 summary is now grouped by dataset and averages only across seeds present in all compared negative conditions.
|
| 111 |
+
|
| 112 |
+
Impact:
|
| 113 |
+
- Prevents balanced/realistic runs from being conflated in downstream summaries.
|
| 114 |
+
|
| 115 |
+
### 6b. Result collection now supports explicit dataset/seed filters
|
| 116 |
+
|
| 117 |
+
Problem:
|
| 118 |
+
- `collect_results.py` previously aggregated every run under `results/baselines/`.
|
| 119 |
+
- As more experiments accumulate, paper tables can accidentally mix exploratory and final runs.
|
| 120 |
+
|
| 121 |
+
Fix:
|
| 122 |
+
- Added `--dataset` and repeatable `--seed` filters to `collect_results.py`.
|
| 123 |
+
- Added repeatable `--model`, `--split`, and `--negative` filters too.
|
| 124 |
+
- The command now fails fast if filters remove all rows.
|
| 125 |
+
- Added optional `--aggregate-seeds` output that writes `table1_aggregated.csv`
|
| 126 |
+
with mean/std over seeds.
|
| 127 |
+
- Added `table1_aggregated.md` with human-readable `mean +/- std` formatting.
|
| 128 |
+
|
| 129 |
+
Impact:
|
| 130 |
+
- Makes paper/report generation reproducible from an explicit run subset.
|
| 131 |
+
- Adds a paper-friendly seed-aggregated summary without removing access to raw runs.
|
| 132 |
+
|
| 133 |
+
### 7. SLURM submission metadata aligned with current run naming
|
| 134 |
+
|
| 135 |
+
Problem:
|
| 136 |
+
- SLURM job names and log file names did not include dataset or seed.
|
| 137 |
+
- Submission wrappers relied on implicit defaults instead of exporting `DATASET` and `SEED` explicitly.
|
| 138 |
+
|
| 139 |
+
Fix:
|
| 140 |
+
- Updated SLURM wrapper scripts to include dataset and seed in job/log naming.
|
| 141 |
+
- Submission wrappers now export `DATASET` and `SEED` explicitly to the training job.
|
| 142 |
+
- Training job logging now prints seed together with model/split/negative/dataset.
|
| 143 |
+
|
| 144 |
+
Impact:
|
| 145 |
+
- Makes cluster logs line up with the run directories and result-collection filters.
|
| 146 |
+
|
| 147 |
+
### 7b. SLURM submission scripts now support seed sweeps
|
| 148 |
+
|
| 149 |
+
Problem:
|
| 150 |
+
- Multi-seed experiments required manual resubmission or manual editing of shell scripts.
|
| 151 |
+
|
| 152 |
+
Fix:
|
| 153 |
+
- Added `SEEDS="42 43 44"` style support to SLURM submission wrappers.
|
| 154 |
+
- Added optional `MODELS`, `SPLITS`, and `NEGATIVES` filters to `submit_all.sh`.
|
| 155 |
+
- Default behavior remains `SEEDS=42`, preserving the prior single-seed workflow.
|
| 156 |
+
|
| 157 |
+
Impact:
|
| 158 |
+
- Makes it straightforward to launch reproducible seed sweeps that match `collect_results.py --seed ...`.
|
| 159 |
+
- Makes selective experiment submission possible without editing shell scripts.
|
| 160 |
+
|
| 161 |
+
### 7c. Added Cayuga SSH helper wrappers for remote submission and monitoring
|
| 162 |
+
|
| 163 |
+
Problem:
|
| 164 |
+
- The documented Cayuga workflow relies on SSH ControlMaster and non-interactive remote commands.
|
| 165 |
+
- Reconstructing the exact remote submit/monitor commands by hand is error-prone.
|
| 166 |
+
|
| 167 |
+
Fix:
|
| 168 |
+
- Added `slurm/remote_submit_cayuga.sh` to call `submit_all.sh` remotely via `ssh ${HPC_LOGIN:-cayuga-login1}`.
|
| 169 |
+
- Added `slurm/remote_monitor_cayuga.sh` to inspect `squeue` and recent log files remotely.
|
| 170 |
+
- Made SLURM binary paths overridable via `SBATCH_BIN` / `SQUEUE_BIN`.
|
| 171 |
+
|
| 172 |
+
Impact:
|
| 173 |
+
- Makes the repository align directly with the Cayuga SSH workflow documented in the lab notes.
|
| 174 |
+
|
| 175 |
+
## Regression Tests Added
|
| 176 |
+
|
| 177 |
+
Added `tests/test_pipeline_scripts.py` covering:
|
| 178 |
+
|
| 179 |
+
- export uses latest split version without row duplication
|
| 180 |
+
- `prepare_exp_data.py --skip-exp4` no longer requires the pairs parquet
|
| 181 |
+
- run names include dataset and seed
|
| 182 |
+
- invalid DDB dataset resolution is rejected
|
| 183 |
+
- unsupported realistic random-control runs are rejected
|
| 184 |
+
- `train_baseline.py` rejects `split=ddb` with `dataset=realistic`
|
| 185 |
+
- `train_baseline.py` writes results into a dataset+seed-qualified run directory
|
| 186 |
+
- graph cache backfills missing SMILES
|
| 187 |
+
- result collection preserves dataset/seed and groups Exp 1 summaries by dataset using matched seeds only
|
| 188 |
+
- result collection can filter by dataset and seed before building tables
|
| 189 |
+
- result collection can also filter by model, split, and negative source
|
| 190 |
+
- result collection can optionally write seed-aggregated tables
|
| 191 |
+
- result collection writes both aggregated CSV and aggregated Markdown summaries
|
| 192 |
+
- SLURM job/log names now include dataset and seed, matching run output naming
|
| 193 |
+
- SLURM submission wrappers support multi-seed sweeps via the `SEEDS` environment variable
|
| 194 |
+
- `submit_all.sh` can filter submitted experiments by model, split, and negative source
|
| 195 |
+
- Added remote Cayuga SSH helper scripts for submission and monitoring
|
| 196 |
+
|
| 197 |
+
## Verification
|
| 198 |
+
|
| 199 |
+
Commands run:
|
| 200 |
+
|
| 201 |
+
```bash
|
| 202 |
+
uv run pytest tests/test_export.py -q
|
| 203 |
+
uv run pytest tests/test_pipeline_scripts.py -q
|
| 204 |
+
```
|
| 205 |
+
|
| 206 |
+
Observed results:
|
| 207 |
+
|
| 208 |
+
- `tests/test_export.py`: 52 passed
|
| 209 |
+
- `tests/test_pipeline_scripts.py`: 17 passed, 1 skipped
|
| 210 |
+
|
| 211 |
+
Skip reason:
|
| 212 |
+
- the graph cache backfill test is skipped when `torch` is unavailable in the environment
|
| 213 |
+
|
| 214 |
+
## Follow-up
|
| 215 |
+
|
| 216 |
+
Recommended next step:
|
| 217 |
+
|
| 218 |
+
- add one end-to-end script test that exercises `train_baseline.py` CLI with heavy training
|
| 219 |
+
functions mocked out, to lock down argument validation and results writing behavior together
|
| 220 |
+
|
| 221 |
+
## Lessons Learned (2026-03-12)
|
| 222 |
+
|
| 223 |
+
### 1. Exp 4 must be defined as a full-task split, not a negative-only transform
|
| 224 |
+
|
| 225 |
+
What we learned:
|
| 226 |
+
- Treating DDB as "only reassign negatives while positives keep `split_random`" weakens the claim.
|
| 227 |
+
- That setup mixes split bias with class-specific handling and no longer measures the benchmark-level effect of node-degree bias.
|
| 228 |
+
|
| 229 |
+
Decision:
|
| 230 |
+
- Exp 4 is now defined as a **full-task degree-balanced split on merged M1 balanced data**.
|
| 231 |
+
- Positives and negatives must be reassigned together under the same `split_degree_balanced` policy.
|
| 232 |
+
|
| 233 |
+
Operational rule:
|
| 234 |
+
- Any future regeneration of `exports/negbiodb_m1_balanced_ddb.parquet` must be done from the merged M1 benchmark, not by patching only the negative subset.
|
| 235 |
+
|
| 236 |
+
### 2. Eval-only rewrites can make stale checkpoints look fresh
|
| 237 |
+
|
| 238 |
+
What we learned:
|
| 239 |
+
- Using `results.json` modification time alone is not a reliable freshness signal.
|
| 240 |
+
- `eval_checkpoint.py` can rewrite results for an old checkpoint and make a stale run appear newly generated.
|
| 241 |
+
|
| 242 |
+
Decision:
|
| 243 |
+
- Freshness checks must use training artifacts first (`best.pt`, `last.pt`, `training_log.csv`) and only fall back to `results.json` if no training artifacts exist.
|
| 244 |
+
|
| 245 |
+
Operational rule:
|
| 246 |
+
- Any future result filtering or release packaging should treat checkpoint/log timestamps as the source of truth for run freshness.
|
| 247 |
+
|
| 248 |
+
### 3. DDB benchmark regeneration invalidates prior DDB model results
|
| 249 |
+
|
| 250 |
+
What we learned:
|
| 251 |
+
- Once `exports/negbiodb_m1_balanced_ddb.parquet` changes, all prior `*_ddb_*` model results become semantically stale even if files still parse.
|
| 252 |
+
- Silent reuse of those results is more dangerous than having missing rows.
|
| 253 |
+
|
| 254 |
+
Decision:
|
| 255 |
+
- `collect_results.py` now excludes stale DDB runs by default when they are older than the current DDB parquet.
|
| 256 |
+
|
| 257 |
+
Operational rule:
|
| 258 |
+
- After any future DDB regeneration, immediately retrain the DDB model trio before producing paper tables.
|
| 259 |
+
|
| 260 |
+
### 4. Remote HPC execution can drift from local fixes
|
| 261 |
+
|
| 262 |
+
What we learned:
|
| 263 |
+
- The Cayuga working directory is effectively a deployed copy, not a git-controlled checkout.
|
| 264 |
+
- Local fixes do not automatically exist on the remote side, so "submitted successfully" does not imply "submitted the corrected code."
|
| 265 |
+
|
| 266 |
+
Decision:
|
| 267 |
+
- Before remote submission, explicitly sync changed code or regenerate artifacts in place on Cayuga.
|
| 268 |
+
- For large derived files, remote regeneration is usually safer than bulk transfer.
|
| 269 |
+
|
| 270 |
+
Operational rule:
|
| 271 |
+
- Prefer:
|
| 272 |
+
1. sync small code/script changes
|
| 273 |
+
2. regenerate derived artifacts on Cayuga
|
| 274 |
+
3. then submit jobs
|
| 275 |
+
|
| 276 |
+
### 5. Submission wrappers need shell syntax coverage, not just static review
|
| 277 |
+
|
| 278 |
+
What we learned:
|
| 279 |
+
- `slurm/submit_all.sh` still contained loop-closing bugs after functional edits.
|
| 280 |
+
- These did not surface until the exact submission path was exercised on the remote cluster.
|
| 281 |
+
|
| 282 |
+
Decision:
|
| 283 |
+
- Shell wrappers should be syntax-checked with `bash -n` whenever edited.
|
| 284 |
+
|
| 285 |
+
Operational rule:
|
| 286 |
+
- For any future SLURM wrapper change:
|
| 287 |
+
- run `bash -n <script>`
|
| 288 |
+
- then run one narrow submission command before treating the wrapper as good
|
| 289 |
+
|
| 290 |
+
## Future Reference
|
| 291 |
+
|
| 292 |
+
### Step 4 current ground truth
|
| 293 |
+
|
| 294 |
+
- Exp 4 = `split=ddb`, `negative=negbiodb`, `dataset=balanced`
|
| 295 |
+
- DDB parquet = `exports/negbiodb_m1_balanced_ddb.parquet`
|
| 296 |
+
- DDB parquet is generated from merged M1 balanced data with positives and negatives reassigned together
|
| 297 |
+
- Stale DDB results are excluded by default in `scripts/collect_results.py`
|
| 298 |
+
- Override flag if absolutely needed: `--allow-stale-ddb`
|
| 299 |
+
|
| 300 |
+
### Safe workflow for future DDB reruns
|
| 301 |
+
|
| 302 |
+
1. Sync changed code to Cayuga.
|
| 303 |
+
2. Regenerate DDB parquet on Cayuga:
|
| 304 |
+
- `source ${CONDA_PREFIX:-/path/to/conda}/miniconda3/etc/profile.d/conda.sh`
|
| 305 |
+
- `conda activate negbiodb-ml`
|
| 306 |
+
- `cd ${SCRATCH_DIR:-/path/to/scratch}/negbiodb`
|
| 307 |
+
- `python scripts/prepare_exp_data.py --skip-exp1`
|
| 308 |
+
3. Submit only DDB jobs:
|
| 309 |
+
- `SEEDS="42" MODELS="deepdta graphdta drugban" SPLITS="ddb" NEGATIVES="negbiodb" DATASETS="balanced" bash slurm/submit_all.sh`
|
| 310 |
+
4. Verify queue state:
|
| 311 |
+
- `/opt/ohpc/pub/software/slurm/24.05.2/bin/squeue -u ${USER}`
|
| 312 |
+
5. Re-collect tables after those runs finish.
|
| 313 |
+
|
| 314 |
+
### Current known-good DDB submission example
|
| 315 |
+
|
| 316 |
+
Successful Cayuga submission on 2026-03-12:
|
| 317 |
+
|
| 318 |
+
- `negbio_deepdta_balanced_ddb_negbiodb_seed42` → job `2702356`
|
| 319 |
+
- `negbio_graphdta_balanced_ddb_negbiodb_seed42` → job `2702357`
|
| 320 |
+
- `negbio_drugban_balanced_ddb_negbiodb_seed42` → job `2702358`
|
| 321 |
+
|
| 322 |
+
### Files to inspect first next time
|
| 323 |
+
|
| 324 |
+
- `scripts/prepare_exp_data.py`
|
| 325 |
+
- `src/negbiodb/export.py`
|
| 326 |
+
- `scripts/collect_results.py`
|
| 327 |
+
- `scripts/train_baseline.py`
|
| 328 |
+
- `scripts/eval_checkpoint.py`
|
| 329 |
+
- `slurm/submit_all.sh`
|
| 330 |
+
- `tests/test_pipeline_scripts.py`
|
PROJECT_OVERVIEW.md
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# NegBioDB: Negative Results Database & Dual ML/LLM Benchmark
|
| 2 |
+
|
| 3 |
+
> Biology-first, science-extensible negative results database and dual ML+LLM benchmark
|
| 4 |
+
|
| 5 |
+
*Last updated: 2026-03-30*
|
| 6 |
+
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
## Project Vision
|
| 10 |
+
|
| 11 |
+
Approximately 90% of scientific experiments produce null or inconclusive results, yet the vast majority remain unpublished. This systematic gap fundamentally distorts AI/ML model training and evaluation.
|
| 12 |
+
|
| 13 |
+
**Goal:** Systematically collect and structure experimentally confirmed negative results across biomedical domains, and build benchmarks that quantify the impact of publication bias on AI/ML models.
|
| 14 |
+
|
| 15 |
+
## Why This Matters
|
| 16 |
+
|
| 17 |
+
1. **Publication Bias**: 85% of published papers report only positive results
|
| 18 |
+
2. **AI Model Bias**: Models trained without negative data produce excessive false positives
|
| 19 |
+
3. **Economic Waste**: Duplicated experiments, failed drug discovery pipelines (billions of dollars)
|
| 20 |
+
4. **Proven Impact**: Models trained with negative data are more accurate (Organic Letters 2023, bioRxiv 2024)
|
| 21 |
+
|
| 22 |
+
---
|
| 23 |
+
|
| 24 |
+
## Architecture
|
| 25 |
+
|
| 26 |
+
```
|
| 27 |
+
Four Biomedical Domains
|
| 28 |
+
┌────────────────────────────────────────────────────────────┐
|
| 29 |
+
│ NegBioDB │
|
| 30 |
+
│ DTI CT PPI GE │
|
| 31 |
+
│ (30.5M neg) (133K neg) (2.2M neg) (28.8M neg) │
|
| 32 |
+
│ ChEMBL+ AACT+ IntAct+ DepMap │
|
| 33 |
+
│ PubChem+ CTO+ HuRI+ CRISPR+RNAi │
|
| 34 |
+
│ BindingDB+ OpenTargets+ hu.MAP+ │
|
| 35 |
+
│ DAVIS Shi&Du STRING │
|
| 36 |
+
└────────────────────────────────────────────────────────────┘
|
| 37 |
+
│ │
|
| 38 |
+
┌──────┴──────┐ ┌─────┴──────┐
|
| 39 |
+
│ ML Benchmark │ │LLM Benchmark│
|
| 40 |
+
│ 3 models × │ │ 5 models × │
|
| 41 |
+
│ 5 splits × │ │ 4 levels × │
|
| 42 |
+
│ 2 neg types │ │ 4 configs │
|
| 43 |
+
└─────────────┘ └────────────┘
|
| 44 |
+
```
|
| 45 |
+
|
| 46 |
+
## Key Technical Decisions
|
| 47 |
+
|
| 48 |
+
| Decision | Choice | Rationale |
|
| 49 |
+
|----------|--------|-----------|
|
| 50 |
+
| License | CC BY-SA 4.0 | Compatible with ChEMBL CC BY-SA 3.0 (viral clause) |
|
| 51 |
+
| Storage | SQLite per domain | Portable, zero-infrastructure, reproducible |
|
| 52 |
+
| Export | Parquet with split columns | Standard ML format; lazy-loading friendly |
|
| 53 |
+
| ML metrics | LogAUC + 6 others | LogAUC[0.001,0.1] measures early enrichment, not just AUROC |
|
| 54 |
+
| LLM evaluation | 4 levels (L1–L4) | Progressive difficulty: MCQ → extraction → reasoning → discrimination |
|
| 55 |
+
|
| 56 |
+
---
|
| 57 |
+
|
| 58 |
+
## Domain Status Summary (as of 2026-03-30)
|
| 59 |
+
|
| 60 |
+
| Domain | DB Size | Negatives | ML Runs | LLM Runs | Status |
|
| 61 |
+
|--------|---------|-----------|---------|----------|--------|
|
| 62 |
+
| **DTI** | ~21 GB | 30,459,583 | 24/24 ✅ | 81/81 ✅ | Complete |
|
| 63 |
+
| **CT** | ~500 MB | 132,925 | 108/108 ✅ | 80/80 ✅ | Complete |
|
| 64 |
+
| **PPI** | 849 MB | 2,229,670 | 54/54 ✅ | 80/80 ✅ | Complete |
|
| 65 |
+
| **GE** | ~16 GB | 28,759,256 | Seed 42 ✅ | 4/5 models ✅ | Near complete |
|
| 66 |
+
|
| 67 |
+
---
|
| 68 |
+
|
| 69 |
+
## DTI Domain (Drug-Target Interaction)
|
| 70 |
+
|
| 71 |
+
Four sources: ChEMBL v36, PubChem BioAssay, BindingDB, DAVIS
|
| 72 |
+
|
| 73 |
+
### Database
|
| 74 |
+
- **30,459,583** negative results
|
| 75 |
+
- Source tiers: gold 818,611 / silver 198 / bronze 28,845,632
|
| 76 |
+
- 5 split strategies: random / cold_compound / cold_target / scaffold / temporal
|
| 77 |
+
|
| 78 |
+
### Key Results
|
| 79 |
+
- **ML:** Degree-matched negatives inflate LogAUC by +0.112 on average. Cold-target splits catastrophic (LogAUC 0.15–0.33) while AUROC stays deceptively high (0.76–0.89).
|
| 80 |
+
- **LLM L4:** All models near-random (MCC ≤ 0.18). DTI binding decisions are too nuanced for LLMs without domain context.
|
| 81 |
+
- **LLM L1:** Gemini achieves perfect accuracy (1.000) on 3-shot MCQ — artifact of format recognition.
|
| 82 |
+
|
| 83 |
+
---
|
| 84 |
+
|
| 85 |
+
## CT Domain (Clinical Trial Failure)
|
| 86 |
+
|
| 87 |
+
Four sources: AACT (ClinicalTrials.gov), CTO, Open Targets, Shi & Du 2024
|
| 88 |
+
|
| 89 |
+
### Database
|
| 90 |
+
- **132,925** failure results from 216,987 trials
|
| 91 |
+
- Tiers: gold 23,570 / silver 28,505 / bronze 60,223 / copper 20,627
|
| 92 |
+
- 8 failure categories: safety > efficacy > enrollment > strategic > regulatory > design > other
|
| 93 |
+
- Drug resolution: 4-step pipeline (ChEMBL exact → PubChem API → fuzzy JaroWinkler → manual CSV)
|
| 94 |
+
|
| 95 |
+
### Benchmark Design
|
| 96 |
+
- **ML:** CT-M1 binary failure prediction; CT-M2 7-way failure category (most challenging)
|
| 97 |
+
- **LLM:** L1 5-way MCQ (1,500 items), L2 failure report extraction (500), L3 reasoning (200), L4 discrimination (500)
|
| 98 |
+
|
| 99 |
+
### Key Results
|
| 100 |
+
- **CT-M1:** NegBioDB negatives trivially separable (AUROC=1.0). Control negatives reveal real difficulty (0.76–0.84).
|
| 101 |
+
- **CT-M2:** XGBoost best (macro-F1=0.51). Scaffold/temporal splits hardest (0.19).
|
| 102 |
+
- **LLM L4:** Gemini MCC=0.56 — highest across all domains. Meaningful discrimination possible for trial failure.
|
| 103 |
+
- **LLM L3:** Ceiling effect — GPT-4o-mini judge too lenient (4.4–5.0/5.0).
|
| 104 |
+
|
| 105 |
+
---
|
| 106 |
+
|
| 107 |
+
## PPI Domain (Protein-Protein Interaction)
|
| 108 |
+
|
| 109 |
+
Four sources: IntAct, HuRI, hu.MAP 3.0, STRING v12.0
|
| 110 |
+
|
| 111 |
+
### Database
|
| 112 |
+
- **2,229,670** negative results; 61,728 positive pairs (HuRI Y2H)
|
| 113 |
+
- 18,412 proteins; 4 split strategies: random / cold_protein / cold_both / degree_balanced
|
| 114 |
+
|
| 115 |
+
### Key Results
|
| 116 |
+
- **ML:** MLPFeatures (hand-crafted) dominates cold splits (AUROC 0.95 cold_both); PIPR collapses to 0.41 (below random).
|
| 117 |
+
- **LLM L1:** 3-shot near-perfect (0.997–1.000) is an artifact of example format leakage.
|
| 118 |
+
- **LLM L3:** zero-shot >> 3-shot (4.3–4.7 vs 3.1–3.7); gold reasoning examples degrade structural reasoning.
|
| 119 |
+
- **LLM L4:** MCC 0.33–0.44 with confirmed temporal contamination (pre-2015 acc ~0.6–0.8, post-2020 acc ~0.07–0.25).
|
| 120 |
+
|
| 121 |
+
---
|
| 122 |
+
|
| 123 |
+
## GE Domain (Gene Essentiality / DepMap)
|
| 124 |
+
|
| 125 |
+
Two sources: DepMap CRISPR (Chronos scores) and RNAi (DEMETER2)
|
| 126 |
+
|
| 127 |
+
### Database
|
| 128 |
+
- **28,759,256** negative results (genes with no essentiality signal)
|
| 129 |
+
- Final tiers: Gold 753,878 / Silver 18,608,686 / Bronze 9,396,692
|
| 130 |
+
- 19,554 genes × 2,132 cell lines; 22,549,910 aggregated pairs
|
| 131 |
+
- 5 split strategies: random / cold_gene / cold_cell_line / cold_both / degree_balanced
|
| 132 |
+
|
| 133 |
+
### Benchmark Design
|
| 134 |
+
- **ML:** XGBoost and MLPFeatures on gene expression + lineage features (gene-cell pair prediction)
|
| 135 |
+
- **LLM:** L1 4-way essentiality MCQ (1,200 items), L2 essentiality data extraction (500), L3 reasoning (200), L4 discrimination (475)
|
| 136 |
+
|
| 137 |
+
### Key Results (partial — Llama pending)
|
| 138 |
+
- **LLM L3:** zero-shot >> 3-shot (overall mean 4.5 vs 2.5) — same pattern as PPI.
|
| 139 |
+
- **LLM L4:** Expected intermediate MCC (DepMap is widely studied; likely contamination present).
|
| 140 |
+
- **ML:** Seed 42 complete; final aggregated results pending seeds 43/44.
|
| 141 |
+
|
| 142 |
+
---
|
| 143 |
+
|
| 144 |
+
## Dual Benchmark Framework
|
| 145 |
+
|
| 146 |
+
### LLM Benchmark Levels
|
| 147 |
+
|
| 148 |
+
| Level | Task | Difficulty | Automation |
|
| 149 |
+
|-------|------|-----------|------------|
|
| 150 |
+
| L1 | Multiple-choice classification | Easy | Fully automated (exact match) |
|
| 151 |
+
| L2 | Structured field extraction | Medium | Automated (JSON schema check + field F1) |
|
| 152 |
+
| L3 | Free-text reasoning quality | Hard | LLM-as-judge (Gemini 2.5-Flash, 4 rubric dimensions) |
|
| 153 |
+
| L4 | Real vs synthetic discrimination | Hard | Automated (MCC on binary decision) |
|
| 154 |
+
|
| 155 |
+
### LLM Models Evaluated
|
| 156 |
+
|
| 157 |
+
| Model | Provider | Type |
|
| 158 |
+
|-------|----------|------|
|
| 159 |
+
| Claude Haiku 4.5 | Anthropic API | Small API model |
|
| 160 |
+
| Gemini 2.5-Flash | Google API | Small API model |
|
| 161 |
+
| GPT-4o-mini | OpenAI API | Small API model |
|
| 162 |
+
| Qwen2.5-7B-Instruct | HuggingFace / vLLM | Open-weight local |
|
| 163 |
+
| Llama-3.1-8B-Instruct | HuggingFace / vLLM | Open-weight local |
|
| 164 |
+
|
| 165 |
+
### Cross-Domain LLM L4 Summary
|
| 166 |
+
|
| 167 |
+
```
|
| 168 |
+
DTI (≤0.18) < PPI (0.33–0.44) < CT (0.48–0.56)
|
| 169 |
+
↑
|
| 170 |
+
Increasing task complexity
|
| 171 |
+
and LLM accessible signal
|
| 172 |
+
```
|
| 173 |
+
|
| 174 |
+
---
|
| 175 |
+
|
| 176 |
+
## Timeline
|
| 177 |
+
|
| 178 |
+
| Milestone | Date |
|
| 179 |
+
|-----------|------|
|
| 180 |
+
| Project initiated | 2026-03-02 |
|
| 181 |
+
| DTI domain complete (ML + LLM) | 2026-03-13 |
|
| 182 |
+
| CT domain initiated | 2026-03-17 |
|
| 183 |
+
| CT domain complete (ML + LLM) | 2026-03-20 |
|
| 184 |
+
| PPI domain complete (ML + LLM) | 2026-03-23 |
|
| 185 |
+
| GE domain ETL + ML export | 2026-03-23 |
|
| 186 |
+
| GE LLM (4/5 models) | 2026-03-24 |
|
| 187 |
+
| Public release (GitHub + HuggingFace) | 2026-03-30 |
|
README.md
ADDED
|
@@ -0,0 +1,403 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# NegBioDB
|
| 2 |
+
|
| 3 |
+
**Negative Results Database & Dual ML/LLM Benchmark for Biomedical Sciences**
|
| 4 |
+
|
| 5 |
+
[](https://creativecommons.org/licenses/by-sa/4.0/)
|
| 6 |
+
[](https://www.python.org/downloads/)
|
| 7 |
+
|
| 8 |
+
Approximately 90% of scientific experiments produce null or inconclusive results, yet the vast majority remain unpublished. NegBioDB systematically collects experimentally confirmed negative results across four biomedical domains and provides dual-track ML + LLM benchmarks to quantify the impact of this publication bias on AI models.
|
| 9 |
+
|
| 10 |
+
## Key Features
|
| 11 |
+
|
| 12 |
+
- **Four domains**: Drug-Target Interaction (DTI), Clinical Trial Failure (CT), Protein-Protein Interaction (PPI), Gene Essentiality (GE/DepMap)
|
| 13 |
+
- **~61.6M negative results** across 4 SQLite databases (30.5M DTI + 133K CT + 2.2M PPI + 28.8M GE)
|
| 14 |
+
- **Dual benchmark**: ML track (traditional prediction) + LLM track (biomedical NLP tasks)
|
| 15 |
+
- **242 ML runs** + **321 LLM runs** completed across all domains
|
| 16 |
+
- **Multiple split strategies**: random, cold-entity, temporal, scaffold, degree-balanced
|
| 17 |
+
- **Reproducible pipeline**: SQLite databases, config-driven ETL, SLURM/HPC support
|
| 18 |
+
- **Standardized evaluation**: 7 ML metrics (LogAUC, BEDROC, EF, AUROC, AUPRC, MCC) + LLM rubrics
|
| 19 |
+
|
| 20 |
+
## Database Statistics
|
| 21 |
+
|
| 22 |
+
| Domain | Negative Results | Key Entities | Sources | DB Size |
|
| 23 |
+
|--------|-----------------|--------------|---------|---------|
|
| 24 |
+
| **DTI** | 30,459,583 | 919K compounds, 3.7K targets | ChEMBL, PubChem, BindingDB, DAVIS | ~21 GB |
|
| 25 |
+
| **CT** | 132,925 | 177K interventions, 56K conditions | AACT, CTO, Open Targets, Shi & Du | ~500 MB |
|
| 26 |
+
| **PPI** | 2,229,670 | 18.4K proteins | IntAct, HuRI, hu.MAP, STRING | 849 MB |
|
| 27 |
+
| **GE** | 28,759,256 | 19,554 genes, 2,132 cell lines | DepMap (CRISPR, RNAi) | ~16 GB |
|
| 28 |
+
| **Total** | **~61.6M** | | **14 sources** | **~38 GB** |
|
| 29 |
+
|
| 30 |
+
*PPI DB total: 2,229,670; export rows after split filtering: 2,220,786.*
|
| 31 |
+
|
| 32 |
+
## Project Status
|
| 33 |
+
|
| 34 |
+
| Domain | ETL | ML Benchmark | LLM Benchmark | Status |
|
| 35 |
+
|--------|-----|-------------|---------------|--------|
|
| 36 |
+
| DTI | 4 sources | 24/24 runs | 81/81 runs | Complete |
|
| 37 |
+
| CT | 4 sources | 108/108 runs | 80/80 runs | Complete |
|
| 38 |
+
| PPI | 4 sources | 54/54 runs | 80/80 runs | Complete |
|
| 39 |
+
| GE | 2 sources | 14/14 runs (seed 42) | 64/80 runs* | Seed 42 ML complete, LLM 4/5 models |
|
| 40 |
+
|
| 41 |
+
*Llama 3.1-8B results pending HPC GPU availability; seeds 43/44 in progress.
|
| 42 |
+
|
| 43 |
+
---
|
| 44 |
+
|
| 45 |
+
## Key Findings
|
| 46 |
+
|
| 47 |
+
### ML: Negative Source Matters
|
| 48 |
+
|
| 49 |
+
**DTI** — Degree-matched negatives inflate LogAUC by +0.112 on average. Cold-target splits cause catastrophic failure (LogAUC 0.15–0.33), while AUROC misleadingly stays 0.76–0.89.
|
| 50 |
+
|
| 51 |
+
| DTI Model | Random (NegBioDB) | Random (Degree-Matched) | Cold-Target |
|
| 52 |
+
|-----------|------------------|------------------------|-------------|
|
| 53 |
+
| DeepDTA | 0.833 | **0.919** | 0.325 |
|
| 54 |
+
| GraphDTA | 0.843 | **0.967** | 0.241 |
|
| 55 |
+
| DrugBAN | 0.830 | **0.955** | 0.151 |
|
| 56 |
+
|
| 57 |
+
**PPI** — PIPR cold_both AUROC drops to 0.409 (below random); MLPFeatures remains robust at 0.950 due to hand-crafted features.
|
| 58 |
+
|
| 59 |
+
**CT** — NegBioDB negatives are trivially separable (AUROC ~1.0); M2 7-way classification is challenging (best macro-F1 = 0.51).
|
| 60 |
+
|
| 61 |
+
**GE** — Cold-gene splits reveal severe generalization gaps; degree-balanced negatives modestly improve ranking metrics over random negatives.
|
| 62 |
+
|
| 63 |
+
### LLM: L4 Discrimination Reveals Domain Differences
|
| 64 |
+
|
| 65 |
+
| Domain | L4 MCC Range | Interpretation | Contamination |
|
| 66 |
+
|--------|-------------|----------------|---------------|
|
| 67 |
+
| DTI | ≤ 0.18 | Near random | Not detected |
|
| 68 |
+
| PPI | 0.33–0.44 | Moderate | **Yes** (temporal gap) |
|
| 69 |
+
| CT | 0.48–0.56 | Meaningful | Not detected |
|
| 70 |
+
| GE | Pending full run | — | — |
|
| 71 |
+
|
| 72 |
+
PPI L4 reveals **temporal contamination**: pre-2015 interaction data is identified at 59–79% accuracy, while post-2020 data drops to 7–25%. LLMs rely on memorized training data, not biological reasoning.
|
| 73 |
+
|
| 74 |
+
---
|
| 75 |
+
|
| 76 |
+
## Setup
|
| 77 |
+
|
| 78 |
+
Requires Python 3.11+ and [uv](https://docs.astral.sh/uv/).
|
| 79 |
+
|
| 80 |
+
```bash
|
| 81 |
+
git clone https://github.com/jang1563/NegBioDB.git
|
| 82 |
+
cd NegBioDB
|
| 83 |
+
make setup # Create venv and install dependencies
|
| 84 |
+
make db # Initialize SQLite database
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
## Data Pipeline
|
| 88 |
+
|
| 89 |
+
### DTI Domain
|
| 90 |
+
|
| 91 |
+
```bash
|
| 92 |
+
make download # Download all 4 sources (ChEMBL, PubChem, BindingDB, DAVIS)
|
| 93 |
+
make load-all # Run all ETL loaders
|
| 94 |
+
uv run python scripts/export_ml_dataset.py # Export ML datasets
|
| 95 |
+
```
|
| 96 |
+
|
| 97 |
+
### CT Domain
|
| 98 |
+
|
| 99 |
+
```bash
|
| 100 |
+
# Download sources (AACT URL changes monthly)
|
| 101 |
+
uv run python scripts_ct/download_aact.py --url <AACT_URL>
|
| 102 |
+
uv run python scripts_ct/download_cto.py
|
| 103 |
+
uv run python scripts_ct/download_opentargets.py
|
| 104 |
+
uv run python scripts_ct/download_shi_du.py
|
| 105 |
+
|
| 106 |
+
# Load and process
|
| 107 |
+
uv run python scripts_ct/load_aact.py
|
| 108 |
+
uv run python scripts_ct/classify_failures.py
|
| 109 |
+
uv run python scripts_ct/resolve_drugs.py
|
| 110 |
+
uv run python scripts_ct/load_outcomes.py
|
| 111 |
+
uv run python scripts_ct/export_ct_ml_dataset.py
|
| 112 |
+
```
|
| 113 |
+
|
| 114 |
+
### PPI Domain
|
| 115 |
+
|
| 116 |
+
```bash
|
| 117 |
+
# Download sources
|
| 118 |
+
uv run python scripts_ppi/download_intact.py
|
| 119 |
+
uv run python scripts_ppi/download_huri.py
|
| 120 |
+
uv run python scripts_ppi/download_humap.py
|
| 121 |
+
uv run python scripts_ppi/download_string.py
|
| 122 |
+
|
| 123 |
+
# Load and process
|
| 124 |
+
uv run python scripts_ppi/load_intact.py
|
| 125 |
+
uv run python scripts_ppi/load_huri.py
|
| 126 |
+
uv run python scripts_ppi/load_humap.py
|
| 127 |
+
uv run python scripts_ppi/load_string.py
|
| 128 |
+
uv run python scripts_ppi/fetch_sequences.py
|
| 129 |
+
uv run python scripts_ppi/export_ppi_ml_dataset.py
|
| 130 |
+
```
|
| 131 |
+
|
| 132 |
+
### GE Domain (DepMap)
|
| 133 |
+
|
| 134 |
+
```bash
|
| 135 |
+
# Download DepMap CRISPR and RNAi screens
|
| 136 |
+
uv run python scripts_depmap/download_depmap.py
|
| 137 |
+
|
| 138 |
+
# Load and process
|
| 139 |
+
uv run python scripts_depmap/load_depmap.py
|
| 140 |
+
uv run python scripts_depmap/load_rnai.py
|
| 141 |
+
uv run python scripts_depmap/fetch_gene_descriptions.py
|
| 142 |
+
uv run python scripts_depmap/export_ge_ml_dataset.py
|
| 143 |
+
```
|
| 144 |
+
|
| 145 |
+
## ML Experiments
|
| 146 |
+
|
| 147 |
+
```bash
|
| 148 |
+
# DTI training (local or SLURM)
|
| 149 |
+
uv run python scripts/train_baseline.py --model deepdta --split random --negative negbiodb --dataset balanced
|
| 150 |
+
bash slurm/submit_all.sh
|
| 151 |
+
|
| 152 |
+
# CT training
|
| 153 |
+
uv run python scripts_ct/train_ct_baseline.py --model xgboost --task m1 --split random --negative negbiodb
|
| 154 |
+
bash slurm/submit_ct_all.sh
|
| 155 |
+
|
| 156 |
+
# PPI training
|
| 157 |
+
uv run python scripts_ppi/train_baseline.py --model siamese_cnn --split random --negative negbiodb --dataset balanced
|
| 158 |
+
bash slurm/submit_ppi_all.sh
|
| 159 |
+
|
| 160 |
+
# GE training
|
| 161 |
+
uv run python scripts_depmap/train_ge_baseline.py --model xgboost --split random --negative negbiodb
|
| 162 |
+
bash slurm/submit_ge_ml_all.sh
|
| 163 |
+
|
| 164 |
+
# Results collection (all domains support --aggregate-seeds)
|
| 165 |
+
uv run python scripts/collect_results.py --dataset balanced --aggregate-seeds
|
| 166 |
+
uv run python scripts_ct/collect_ct_results.py --aggregate-seeds
|
| 167 |
+
uv run python scripts_ppi/collect_results.py --dataset balanced --aggregate-seeds
|
| 168 |
+
uv run python scripts_depmap/collect_ge_results.py --aggregate-seeds
|
| 169 |
+
```
|
| 170 |
+
|
| 171 |
+
## LLM Benchmark
|
| 172 |
+
|
| 173 |
+
```bash
|
| 174 |
+
# Build LLM datasets (example: DTI)
|
| 175 |
+
uv run python scripts/build_l1_dataset.py
|
| 176 |
+
uv run python scripts/build_l2_dataset.py
|
| 177 |
+
uv run python scripts/build_l3_dataset.py
|
| 178 |
+
uv run python scripts/build_l4_dataset.py
|
| 179 |
+
|
| 180 |
+
# Run LLM inference
|
| 181 |
+
uv run python scripts/run_llm_benchmark.py --model gemini --level l1 --config zeroshot
|
| 182 |
+
|
| 183 |
+
# GE-specific LLM datasets and inference
|
| 184 |
+
uv run python scripts_depmap/build_ge_l1_dataset.py
|
| 185 |
+
uv run python scripts_depmap/run_ge_llm_benchmark.py --model gemini --level l1 --config zeroshot
|
| 186 |
+
|
| 187 |
+
# Collect results
|
| 188 |
+
uv run python scripts/collect_llm_results.py
|
| 189 |
+
uv run python scripts_ct/collect_ct_llm_results.py
|
| 190 |
+
uv run python scripts_ppi/collect_ppi_llm_results.py
|
| 191 |
+
uv run python scripts_depmap/collect_ge_results.py --llm
|
| 192 |
+
```
|
| 193 |
+
|
| 194 |
+
## Testing
|
| 195 |
+
|
| 196 |
+
```bash
|
| 197 |
+
# All tests (~1,000 total across 4 domains)
|
| 198 |
+
PYTHONPATH=src uv run pytest tests/ -v
|
| 199 |
+
|
| 200 |
+
# By domain
|
| 201 |
+
PYTHONPATH=src uv run pytest tests/test_db.py tests/test_etl_*.py tests/test_export.py -v # DTI
|
| 202 |
+
PYTHONPATH=src uv run pytest tests/test_ct_*.py tests/test_etl_aact.py -v # CT
|
| 203 |
+
PYTHONPATH=src uv run pytest tests/test_ppi_*.py tests/test_etl_intact.py -v # PPI
|
| 204 |
+
PYTHONPATH=src uv run pytest tests/test_ge_*.py tests/test_etl_depmap.py -v # GE
|
| 205 |
+
|
| 206 |
+
# Skip network-dependent tests
|
| 207 |
+
PYTHONPATH=src uv run pytest tests/ -v -m "not integration"
|
| 208 |
+
```
|
| 209 |
+
|
| 210 |
+
## Project Structure
|
| 211 |
+
|
| 212 |
+
```
|
| 213 |
+
NegBioDB/
|
| 214 |
+
├── src/
|
| 215 |
+
│ ├── negbiodb/ # DTI core library
|
| 216 |
+
│ │ ├── db.py # Database creation & migrations
|
| 217 |
+
│ │ ├── download.py # Download utilities (resume, checksum)
|
| 218 |
+
│ │ ├── standardize.py # Compound/target standardization (RDKit)
|
| 219 |
+
│ │ ├── etl_davis.py # DAVIS ETL pipeline
|
| 220 |
+
│ │ ├── etl_chembl.py # ChEMBL ETL pipeline
|
| 221 |
+
│ │ ├── etl_pubchem.py # PubChem ETL (streaming, 29M rows)
|
| 222 |
+
│ │ ├── etl_bindingdb.py # BindingDB ETL pipeline
|
| 223 |
+
│ │ ├── export.py # ML dataset export (Parquet, 5 splits)
|
| 224 |
+
│ │ ├── metrics.py # ML evaluation metrics (7 metrics)
|
| 225 |
+
│ │ ├── llm_client.py # LLM API client (vLLM, Gemini, OpenAI, Anthropic)
|
| 226 |
+
│ │ ├── llm_prompts.py # LLM prompt templates (L1-L4)
|
| 227 |
+
│ │ ├── llm_eval.py # LLM evaluation functions
|
| 228 |
+
│ │ └── models/ # ML baseline models
|
| 229 |
+
│ │ ├── deepdta.py # DeepDTA (sequence CNN)
|
| 230 |
+
│ │ ├── graphdta.py # GraphDTA (graph neural network)
|
| 231 |
+
│ │ └── drugban.py # DrugBAN (bilinear attention)
|
| 232 |
+
│ ├── negbiodb_ct/ # Clinical Trial domain
|
| 233 |
+
│ │ ├── ct_db.py # CT database & migrations
|
| 234 |
+
│ │ ├── etl_aact.py # AACT ETL (13 tables)
|
| 235 |
+
│ │ ├── etl_classify.py # 3-tier failure classification
|
| 236 |
+
│ │ ├── drug_resolver.py # 4-step drug name resolution
|
| 237 |
+
│ │ ├── etl_outcomes.py # Outcome enrichment (p-values, SAE)
|
| 238 |
+
│ │ ├── ct_export.py # ML export (M1/M2, 6 splits)
|
| 239 |
+
│ │ ├── ct_features.py # Feature encoding (1044/1066-dim)
|
| 240 |
+
│ │ ├── ct_models.py # CT_MLP, CT_GNN_Tab models
|
| 241 |
+
│ │ ├── llm_prompts.py # CT LLM prompts (L1-L4)
|
| 242 |
+
│ │ ├── llm_eval.py # CT LLM evaluation
|
| 243 |
+
│ │ └── llm_dataset.py # CT LLM dataset construction
|
| 244 |
+
│ ├── negbiodb_ppi/ # PPI domain
|
| 245 |
+
│ │ ├── ppi_db.py # PPI database & migrations
|
| 246 |
+
│ │ ├── etl_intact.py # IntAct PSI-MI TAB 2.7
|
| 247 |
+
│ │ ├── etl_huri.py # HuRI Y2H screen negatives
|
| 248 |
+
│ │ ├── etl_humap.py # hu.MAP ML-derived negatives
|
| 249 |
+
│ │ ├── etl_string.py # STRING zero-score pairs
|
| 250 |
+
│ │ ├── protein_mapper.py # UniProt validation, ENSG mapping
|
| 251 |
+
│ │ ├── export.py # ML export (4 splits, controls)
|
| 252 |
+
│ │ ├── llm_prompts.py # PPI LLM prompts (L1-L4)
|
| 253 |
+
│ │ ├── llm_eval.py # PPI LLM evaluation
|
| 254 |
+
│ │ ├── llm_dataset.py # PPI LLM dataset construction
|
| 255 |
+
│ │ └── models/ # PPI ML models
|
| 256 |
+
│ │ ├── siamese_cnn.py # Shared CNN encoder
|
| 257 |
+
│ │ ├── pipr.py # Cross-attention PPI model
|
| 258 |
+
│ │ └── mlp_features.py # Hand-crafted feature MLP
|
| 259 |
+
│ └── negbiodb_depmap/ # Gene Essentiality (DepMap) domain
|
| 260 |
+
│ ├── depmap_db.py # GE database & migrations
|
| 261 |
+
│ ├── etl_depmap.py # DepMap CRISPR ETL
|
| 262 |
+
│ ├── etl_rnai.py # RNAi screen ETL
|
| 263 |
+
│ ├── etl_prism.py # PRISM drug screen ETL (optional)
|
| 264 |
+
│ ├── export.py # ML export (5 splits, 770 MB parquet)
|
| 265 |
+
│ ├── ge_features.py # Gene/cell-line feature encoding
|
| 266 |
+
│ ├── llm_prompts.py # GE LLM prompts (L1-L4)
|
| 267 |
+
│ ├── llm_eval.py # GE LLM evaluation
|
| 268 |
+
│ └── llm_dataset.py # GE LLM dataset construction
|
| 269 |
+
├── scripts/ # DTI CLI entry points
|
| 270 |
+
├── scripts_ct/ # CT CLI entry points
|
| 271 |
+
├── scripts_ppi/ # PPI CLI entry points
|
| 272 |
+
├── scripts_depmap/ # GE CLI entry points
|
| 273 |
+
├── slurm/ # SLURM job scripts (HPC-ready, path-agnostic)
|
| 274 |
+
├── migrations/ # DTI SQL schema migrations
|
| 275 |
+
├── migrations_ct/ # CT SQL schema migrations
|
| 276 |
+
├── migrations_ppi/ # PPI SQL schema migrations
|
| 277 |
+
├── migrations_depmap/ # GE SQL schema migrations
|
| 278 |
+
├── tests/ # Test suite (~1,000 tests across 4 domains)
|
| 279 |
+
├── docs/ # Methodology notes and prompt appendices
|
| 280 |
+
├── paper/ # LaTeX source (NeurIPS 2026 submission)
|
| 281 |
+
├── data/ # SQLite databases (not in repo, ~38 GB)
|
| 282 |
+
├── exports/ # ML/LLM export files (Parquet, not in repo)
|
| 283 |
+
├── results/ # Experiment results (not in repo)
|
| 284 |
+
├── config.yaml # Pipeline configuration
|
| 285 |
+
├── Makefile # Build/pipeline commands
|
| 286 |
+
├── pyproject.toml # Python project metadata
|
| 287 |
+
├── experiment_results.md # ML/LLM result tables (all 4 domains)
|
| 288 |
+
├── PROJECT_OVERVIEW.md # Detailed project overview
|
| 289 |
+
└── ROADMAP.md # Execution roadmap
|
| 290 |
+
```
|
| 291 |
+
|
| 292 |
+
## Exported Datasets
|
| 293 |
+
|
| 294 |
+
### DTI (`exports/`)
|
| 295 |
+
|
| 296 |
+
| File | Description |
|
| 297 |
+
|------|-------------|
|
| 298 |
+
| `negbiodb_dti_pairs.parquet` | 1.7M compound-target pairs with 5 split columns |
|
| 299 |
+
| `negbiodb_m1_balanced.parquet` | M1: 1.73M rows (1:1 active:inactive) |
|
| 300 |
+
| `negbiodb_m1_realistic.parquet` | M1: 9.49M rows (1:10 ratio) |
|
| 301 |
+
| `negbiodb_m1_balanced_ddb.parquet` | Exp 4: degree-balanced split |
|
| 302 |
+
| `negbiodb_m1_uniform_random.parquet` | Exp 1: uniform random negatives |
|
| 303 |
+
| `negbiodb_m1_degree_matched.parquet` | Exp 1: degree-matched negatives |
|
| 304 |
+
| `chembl_positives_pchembl6.parquet` | 863K ChEMBL actives (pChEMBL >= 6) |
|
| 305 |
+
| `compound_names.parquet` | 144K compound names (for LLM tasks) |
|
| 306 |
+
|
| 307 |
+
### CT (`exports/ct/`)
|
| 308 |
+
|
| 309 |
+
| File | Description |
|
| 310 |
+
|------|-------------|
|
| 311 |
+
| `negbiodb_ct_pairs.parquet` | 102,850 failure pairs, 6 splits, all tiers |
|
| 312 |
+
| `negbiodb_ct_m1_balanced.parquet` | Binary: 11,222 rows (5,611 pos + 5,611 neg) |
|
| 313 |
+
| `negbiodb_ct_m1_realistic.parquet` | Binary: 36,957 rows (1:~6 ratio) |
|
| 314 |
+
| `negbiodb_ct_m1_smiles_only.parquet` | Binary: 3,878 rows (SMILES-resolved only) |
|
| 315 |
+
| `negbiodb_ct_m2.parquet` | 7-way category: 112,298 rows (non-copper) |
|
| 316 |
+
|
| 317 |
+
### PPI (`exports/ppi/`)
|
| 318 |
+
|
| 319 |
+
| File | Description |
|
| 320 |
+
|------|-------------|
|
| 321 |
+
| `negbiodb_ppi_pairs.parquet` | 2,220,786 negative pairs with split columns |
|
| 322 |
+
| `ppi_m1_balanced.parquet` | M1: 123,456 rows (1:1 pos:neg) |
|
| 323 |
+
| `ppi_m1_realistic.parquet` | M1: 679,008 rows (1:10 ratio) |
|
| 324 |
+
| `ppi_m1_balanced_ddb.parquet` | Exp 4: degree-balanced split |
|
| 325 |
+
| `ppi_m1_uniform_random.parquet` | Exp 1: uniform random negatives |
|
| 326 |
+
| `ppi_m1_degree_matched.parquet` | Exp 1: degree-matched negatives |
|
| 327 |
+
|
| 328 |
+
### GE (`exports/ge/`)
|
| 329 |
+
|
| 330 |
+
| File | Description |
|
| 331 |
+
|------|-------------|
|
| 332 |
+
| `negbiodb_ge_pairs.parquet` | 770 MB; 22.5M gene-cell-line pairs with 5 split columns |
|
| 333 |
+
| `ge_m1_random.parquet` | Random split (train/val/test) |
|
| 334 |
+
| `ge_m1_cold_gene.parquet` | Cold-gene generalization split |
|
| 335 |
+
| `ge_m1_cold_cell_line.parquet` | Cold-cell-line generalization split |
|
| 336 |
+
| `ge_m1_cold_both.parquet` | Cold-both (hardest) split |
|
| 337 |
+
| `ge_m1_degree_balanced.parquet` | Degree-balanced negative control |
|
| 338 |
+
|
| 339 |
+
## Data Sources
|
| 340 |
+
|
| 341 |
+
### DTI
|
| 342 |
+
|
| 343 |
+
| Source | Records | License |
|
| 344 |
+
|--------|---------|---------|
|
| 345 |
+
| [ChEMBL v36](https://www.ebi.ac.uk/chembl/) | 371K | CC BY-SA 3.0 |
|
| 346 |
+
| [PubChem BioAssay](https://pubchem.ncbi.nlm.nih.gov/) | 29.6M | Public Domain |
|
| 347 |
+
| [BindingDB](https://www.bindingdb.org/) | 404K | CC BY |
|
| 348 |
+
| [DAVIS](https://github.com/dingyan20/Davis-Dataset-for-DTA-Prediction) | 20K | Public |
|
| 349 |
+
|
| 350 |
+
### CT
|
| 351 |
+
|
| 352 |
+
| Source | Records | License |
|
| 353 |
+
|--------|---------|---------|
|
| 354 |
+
| [AACT (ClinicalTrials.gov)](https://aact.ctti-clinicaltrials.org/) | 216,987 trials | Public Domain |
|
| 355 |
+
| [CTO](https://github.com/fairnessforensics/CTO) | 20,627 | MIT |
|
| 356 |
+
| [Open Targets](https://www.opentargets.org/) | 32,782 targets | Apache 2.0 |
|
| 357 |
+
| [Shi & Du 2024](https://doi.org/10.1038/s41597-024-03399-2) | 119K + 803K rows | CC BY 4.0 |
|
| 358 |
+
|
| 359 |
+
### PPI
|
| 360 |
+
|
| 361 |
+
| Source | Records | License |
|
| 362 |
+
|--------|---------|---------|
|
| 363 |
+
| [IntAct](https://www.ebi.ac.uk/intact/) | 779 pairs | CC BY 4.0 |
|
| 364 |
+
| [HuRI](http://www.interactome-atlas.org/) | 500,000 pairs | CC BY 4.0 |
|
| 365 |
+
| [hu.MAP 3.0](https://humap3.proteincomplexes.org/) | 1,228,891 pairs | MIT |
|
| 366 |
+
| [STRING v12.0](https://string-db.org/) | 500,000 pairs | CC BY 4.0 |
|
| 367 |
+
|
| 368 |
+
### GE
|
| 369 |
+
|
| 370 |
+
| Source | Records | License |
|
| 371 |
+
|--------|---------|---------|
|
| 372 |
+
| [DepMap CRISPR (Chronos)](https://depmap.org/) | 28.7M gene-cell pairs | CC BY 4.0 |
|
| 373 |
+
| [DepMap RNAi (DEMETER2)](https://depmap.org/) | Integrated | CC BY 4.0 |
|
| 374 |
+
|
| 375 |
+
## ML Evaluation Metrics
|
| 376 |
+
|
| 377 |
+
| Metric | Role |
|
| 378 |
+
|--------|------|
|
| 379 |
+
| **LogAUC[0.001,0.1]** | Primary ranking metric (early enrichment) |
|
| 380 |
+
| **BEDROC (alpha=20)** | Early enrichment (exponentially weighted) |
|
| 381 |
+
| **EF@1%, EF@5%** | Enrichment factor at top 1%/5% |
|
| 382 |
+
| **AUPRC** | Secondary ranking metric |
|
| 383 |
+
| **MCC** | Balanced classification |
|
| 384 |
+
| **AUROC** | Backward compatibility |
|
| 385 |
+
|
| 386 |
+
## Citation
|
| 387 |
+
|
| 388 |
+
If you use NegBioDB in your research, please cite:
|
| 389 |
+
|
| 390 |
+
```bibtex
|
| 391 |
+
@misc{negbiodb2026,
|
| 392 |
+
title={NegBioDB: A Negative Results Database and Dual ML/LLM Benchmark for Biomedical Sciences},
|
| 393 |
+
author={Jang, James},
|
| 394 |
+
year={2026},
|
| 395 |
+
url={https://github.com/jang1563/NegBioDB}
|
| 396 |
+
}
|
| 397 |
+
```
|
| 398 |
+
|
| 399 |
+
## License
|
| 400 |
+
|
| 401 |
+
**CC BY-SA 4.0** — see [LICENSE](LICENSE) for details.
|
| 402 |
+
|
| 403 |
+
This license is required by the viral clause in ChEMBL's CC BY-SA 3.0 license.
|
ROADMAP.md
ADDED
|
@@ -0,0 +1,561 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# NegBioDB — Execution Roadmap
|
| 2 |
+
|
| 3 |
+
> Last updated: 2026-03-30 (v19 — DTI ✅ CT ✅ PPI ✅ GE near-complete: ML seed 42 done, LLM 4/5 models done)
|
| 4 |
+
|
| 5 |
+
---
|
| 6 |
+
|
| 7 |
+
## Critical Findings (Updated March 2026)
|
| 8 |
+
|
| 9 |
+
1. **HCDT 2.0 License: CC BY-NC-ND 4.0** — Cannot redistribute derivatives. Must independently recreate from underlying sources (BindingDB, ChEMBL, GtoPdb, PubChem, TTD). Use 10 uM primary threshold (not 100 uM) to differentiate.
|
| 10 |
+
2. **InertDB License: CC BY-NC** — Cannot include in commercial track. Provide optional download script only.
|
| 11 |
+
3. **Submission requirements**: downloadable data, Croissant metadata, code available, Datasheet for Datasets.
|
| 12 |
+
4. **LIT-PCBA compromised** (2025 audit found data leakage) — Creates urgency for NegBioDB as replacement gold-standard.
|
| 13 |
+
5. **Recommended NegBioDB License: CC BY-SA 4.0** — Compatible with ChEMBL (CC BY-SA 3.0) via one-way upgrade.
|
| 14 |
+
6. **No direct competitor exists** as of March 2026.
|
| 15 |
+
7. **No LLM benchmark tests negative DTI tasks** — ChemBench, Mol-Instructions, MedQA, SciBench all lack negative result evaluation. NegBioBench LLM track is first-of-kind.
|
| 16 |
+
8. **LLM evaluation also free** — Gemini Flash free tier as LLM-as-Judge + ollama local models as baselines. Flagship models (GPT-4, Claude) added post-stabilization only.
|
| 17 |
+
9. **Data volume is NOT the bottleneck** — ChEMBL alone has ~527K quality inactive records (pchembl < 5, validated). PubChem has ~61M target-annotated confirmatory inactives. Estimated 200K+ unique compound-target pairs available. Minimum target raised to **10K curated entries** (from 5K).
|
| 18 |
+
10. **PubChem FTP bulk is far superior to API** — `bioactivities.tsv.gz` (3 GB) contains all 301M bioactivity rows. Processing: < 1 day. API approach would take weeks.
|
| 19 |
+
11. **LLM-as-Judge rate limit (250 RPD)** — Must-have tasks (L1, L2, L4) all use automated evaluation. Judge needed only for should-have L3 (1,530 calls = 6 days). All judge tasks with 3 models = 20 days. With 6 models = 39 days (NOT feasible for sprint).
|
| 20 |
+
12. **Paper narrative must be problem-first** — "Existing benchmarks are broken" (Exp 1 + Exp 4), not "Here's a database." Database is the solution, not the contribution.
|
| 21 |
+
13. **Positive data protocol required** — NegBioDB is negative-only. For ML benchmarking (M1), positive data must be sourced from ChEMBL (pChEMBL ≥ 6). Report two class ratios: balanced (1:1) and realistic (1:10). See §Positive Data Protocol below.
|
| 22 |
+
14. **Random negative baseline must be precisely defined** — Exp 1 compares NegBioDB negatives against random negatives. Random = uniform sampling from untested compound-target pairs (TDC standard). See §Random Negative Control Design.
|
| 23 |
+
15. **Paper format: 9 pages** + unlimited appendix. Croissant is **mandatory** (desk rejection if missing/invalid).
|
| 24 |
+
16. **GPU strategy: Kaggle free tier** (30 hrs/week) is sufficient for 18 ML baseline runs (~36-72 GPU-hours over 4 weeks). Fallback: Colab Pro ($10/month).
|
| 25 |
+
17. **ChEMBL v36** (Sep 2025, 24.3M activities) should be used, not v35. `chembl_downloader` fetches latest by default.
|
| 26 |
+
18. **Nature MI 2025** — Biologically driven negative subsampling paper independently shows "assumed negatives" distort DTI models. Related: EviDTI (Nature Comms 2025), DDB paper (BMC Biology 2025), LIT-PCBA audit (2025).
|
| 27 |
+
|
| 28 |
+
---
|
| 29 |
+
---
|
| 30 |
+
|
| 31 |
+
## Positive Data Protocol (P0 — Expert Panel Finding)
|
| 32 |
+
|
| 33 |
+
NegBioDB is a negative-only database. For ML benchmarking (Task M1: binary DTI prediction), **positive (active) data is required**. This section defines the protocol.
|
| 34 |
+
|
| 35 |
+
### Positive Data Source
|
| 36 |
+
|
| 37 |
+
```sql
|
| 38 |
+
-- Extract active DTIs from ChEMBL v36 SQLite
|
| 39 |
+
-- Threshold: pChEMBL ≥ 6 (IC50/Ki/Kd/EC50 ≤ 1 uM)
|
| 40 |
+
SELECT
|
| 41 |
+
a.molregno, a.pchembl_value, a.standard_type,
|
| 42 |
+
cs.canonical_smiles, cs.standard_inchi_key,
|
| 43 |
+
cp.accession AS uniprot_id
|
| 44 |
+
FROM activities a
|
| 45 |
+
JOIN compound_structures cs ON a.molregno = cs.molregno
|
| 46 |
+
JOIN assays ass ON a.assay_id = ass.assay_id
|
| 47 |
+
JOIN target_dictionary td ON ass.tid = td.tid
|
| 48 |
+
LEFT JOIN target_components tc ON td.tid = tc.tid
|
| 49 |
+
LEFT JOIN component_sequences cp ON tc.component_id = cp.component_id
|
| 50 |
+
WHERE a.pchembl_value >= 6
|
| 51 |
+
AND a.standard_type IN ('IC50', 'Ki', 'Kd', 'EC50')
|
| 52 |
+
AND a.data_validity_comment IS NULL
|
| 53 |
+
AND td.target_type = 'SINGLE PROTEIN'
|
| 54 |
+
AND cp.accession IS NOT NULL
|
| 55 |
+
```
|
| 56 |
+
|
| 57 |
+
### Positive-Negative Pairing
|
| 58 |
+
|
| 59 |
+
| Setting | Ratio | Purpose | Primary Use |
|
| 60 |
+
|---------|-------|---------|-------------|
|
| 61 |
+
| **Balanced** | 1:1 (active:inactive) | Fair model comparison | Exp 1, Exp 4, baselines |
|
| 62 |
+
| **Realistic** | 1:10 (active:inactive) | Real-world HTS simulation | Supplementary evaluation |
|
| 63 |
+
|
| 64 |
+
- Positives restricted to **shared targets** between ChEMBL actives and NegBioDB inactives (same target pool)
|
| 65 |
+
- Same compound standardization pipeline (RDKit) applied to positives
|
| 66 |
+
- DAVIS matrix known actives (pKd ≥ 7, Kd ≤ 100 nM) used as **Gold-standard validation set**
|
| 67 |
+
|
| 68 |
+
### Overlap Prevention
|
| 69 |
+
|
| 70 |
+
- Active and inactive compound-target pairs must not overlap (same pair cannot be both active and inactive)
|
| 71 |
+
- Borderline zone (pChEMBL 4.5–5.5) excluded from both positive and negative sets for clean separation
|
| 72 |
+
- Overlap analysis: report % of NegBioDB negatives where the same compound appears as active against a different target
|
| 73 |
+
|
| 74 |
+
---
|
| 75 |
+
|
| 76 |
+
## Random Negative Control Design (P0 — Expert Panel Finding)
|
| 77 |
+
|
| 78 |
+
Experiment 1 compares NegBioDB's experimentally confirmed negatives against **random negatives**. The random negative generation must be precisely defined.
|
| 79 |
+
|
| 80 |
+
### Control Conditions for Exp 1
|
| 81 |
+
|
| 82 |
+
| Control | Method | What it Tests |
|
| 83 |
+
|---------|--------|---------------|
|
| 84 |
+
| **Uniform random** | Sample untested compound-target pairs uniformly at random from the full cross-product space | Standard TDC approach; tests baseline inflation |
|
| 85 |
+
| **Degree-matched random** | Sample untested pairs matching the degree distribution of NegBioDB pairs | Isolates the effect of experimental confirmation vs. degree bias |
|
| 86 |
+
|
| 87 |
+
**All Exp 1 runs:**
|
| 88 |
+
- 3 ML models (DeepDTA, GraphDTA, DrugBAN)
|
| 89 |
+
- Random split only (for controlled comparison)
|
| 90 |
+
- Same positive data, same split seed
|
| 91 |
+
- Only the negative set changes: NegBioDB confirmed vs. uniform random vs. degree-matched random
|
| 92 |
+
- **Total: 3 models × 3 negative conditions = 9 runs** (was 3 runs; updated)
|
| 93 |
+
- **Note:** The 3 NegBioDB-negative random-split runs are shared with the baseline count (9 baselines include random split). Thus Exp 1 adds only **6 new runs** (uniform random + degree-matched random). Similarly, Exp 4 shares the random-split baseline and adds only **3 new DDB runs**. Overall: 9 baseline + 6 Exp 1 + 3 Exp 4 = **18 total**.
|
| 94 |
+
- **Exp 4 definition:** The DDB comparison uses a full-task degree-balanced split on the merged M1 balanced benchmark. Positives and negatives are reassigned together under the same split policy.
|
| 95 |
+
|
| 96 |
+
### Reporting
|
| 97 |
+
|
| 98 |
+
- Table: [Model × Negative Source × Metric] for LogAUC, AUPRC, MCC
|
| 99 |
+
- Expected: NegBioDB > degree-matched > uniform random for precision-oriented metrics
|
| 100 |
+
- If NegBioDB ≈ uniform random → narrative shifts to Exp 4 (DDB bias) as primary result
|
| 101 |
+
|
| 102 |
+
---
|
| 103 |
+
|
| 104 |
+
## Phase 1: Implementation Sprint (Weeks 0-11)
|
| 105 |
+
|
| 106 |
+
### Week 1: Scaffolding + Download + Schema ✅ COMPLETE
|
| 107 |
+
|
| 108 |
+
- [x] **Project scaffolding**: Create `src/negbiodb/`, `scripts/`, `tests/`, `migrations/`, `config.yaml`, `Makefile`, `pyproject.toml`
|
| 109 |
+
- [x] **Dependency management**: `pyproject.toml` with Python 3.11+, rdkit, pandas, pyarrow, mlcroissant, tqdm, scikit-learn
|
| 110 |
+
- [x] **Makefile skeleton**: Define target structure (full pipeline encoding in Week 2)
|
| 111 |
+
- [x] Finalize database schema (SQLite for MVP) — apply `migrations/001_initial_schema.sql`
|
| 112 |
+
- [x] Download all source data (see below — < 1 day total)
|
| 113 |
+
- [x] **Verify ChEMBL v36** (Sep 2025) downloaded, not v35
|
| 114 |
+
- [x] **[B7] Verify PubChem bioactivities.tsv.gz column names** after download
|
| 115 |
+
- [ ] **[B4] Hardware decision**: Test local RAM/GPU. If < 32GB RAM → use Llama 3.1 8B + Mistral 7B (not 70B). If ≥ 32GB → quantized Llama 3.3 70B (Q4). Document choice.
|
| 116 |
+
- [ ] **[B2] Verify citations**: Search for Nature MI 2025 negative subsampling paper + Science 2025 editorial. If not found → substitute with EviDTI, DDB paper, LIT-PCBA audit
|
| 117 |
+
- [ ] **[B3] Monitor submission deadlines**
|
| 118 |
+
|
| 119 |
+
### Week 2: Standardization + Extraction Start ✅ COMPLETE
|
| 120 |
+
|
| 121 |
+
- [x] Implement compound standardization pipeline (RDKit: salt removal, normalization, InChIKey)
|
| 122 |
+
- [x] Implement target standardization pipeline (UniProt accession as canonical ID)
|
| 123 |
+
- [x] Set up cross-DB deduplication (InChIKey[0:14] connectivity layer)
|
| 124 |
+
- [x] **Makefile pipeline**: Encode full data pipeline dependency graph as executable Makefile targets
|
| 125 |
+
- [ ] **[B5] Check shared target pool size**: Count intersection of NegBioDB targets ∩ ChEMBL pChEMBL ≥ 6 targets. If < 200 targets → expand NegBioDB target extraction
|
| 126 |
+
- [ ] **[B6] Check borderline exclusion impact**: Run pChEMBL distribution query on ChEMBL. Estimate data loss from excluding pChEMBL 4.5–5.5 zone
|
| 127 |
+
|
| 128 |
+
### Week 2-4: Data Extraction ✅ COMPLETE
|
| 129 |
+
|
| 130 |
+
**Result: 30.5M negative_results (>minimum target of 10K — far exceeded)**
|
| 131 |
+
|
| 132 |
+
**Data Sources (License-Safe Only):**
|
| 133 |
+
|
| 134 |
+
| Source | Available Volume | Method | License |
|
| 135 |
+
|--------|-----------------|--------|---------|
|
| 136 |
+
| PubChem BioAssay (confirmatory inactive) | **~61M** (target-annotated) | **FTP bulk: `bioactivities.tsv.gz` (3 GB)** + `bioassays.tsv.gz` (52 MB) | Public domain |
|
| 137 |
+
| ChEMBL pChEMBL < 5 (quality-filtered) | **~527K** records → ~100-200K unique pairs | **SQLite via `chembl_downloader`** (4.6 GB, 1h setup) | CC BY-SA 3.0 |
|
| 138 |
+
| ChEMBL activity_comment "Not Active" | **~763K** (literature-curated) | SQL query on same SQLite dump | CC BY-SA 3.0 |
|
| 139 |
+
| BindingDB (Kd/Ki > 10 uM) | **~30K+** | Bulk TSV download + filter | CC BY |
|
| 140 |
+
| DAVIS complete matrix (pKd ≤ 5) | **~27K** | TDC Python download | Public/academic |
|
| 141 |
+
|
| 142 |
+
**NOT bundled (license issues):**
|
| 143 |
+
- HCDT 2.0 (CC BY-NC-ND) — Use as validation reference only; we use 10 uM threshold (not 100 uM) to differentiate
|
| 144 |
+
- InertDB (CC BY-NC) — Optional download script for users
|
| 145 |
+
|
| 146 |
+
**PubChem FTP extraction pipeline (< 1 day):**
|
| 147 |
+
```
|
| 148 |
+
1. bioassays.tsv.gz → filter confirmatory AIDs with target annotations → ~260K AIDs
|
| 149 |
+
2. bioactivities.tsv.gz (stream) → filter AID ∈ confirmatory, Outcome=Inactive → ~61M records
|
| 150 |
+
3. Prioritize MLPCN/MLSCN assays (~4,500 AIDs, genuine HTS dose-response) for Silver tier
|
| 151 |
+
4. Map SID→CID via Sid2CidSMILES.gz, targets via Aid2GeneidAccessionUniProt.gz
|
| 152 |
+
```
|
| 153 |
+
|
| 154 |
+
- [x] Download PubChem FTP files (bioactivities.tsv.gz + bioassays.tsv.gz + mapping files)
|
| 155 |
+
- [x] Download ChEMBL v36 SQLite via chembl_downloader
|
| 156 |
+
- [x] Download BindingDB bulk TSV
|
| 157 |
+
- [x] Build PubChem FTP extraction script (**streaming with chunksize=100K** — 12GB uncompressed)
|
| 158 |
+
- [x] Build ChEMBL extraction SQL: inactive (activity_comment + pChEMBL < 5) **AND active (pChEMBL ≥ 6)** for positive data
|
| 159 |
+
- [x] Build BindingDB extraction script (filter Kd/Ki > 10 uM, human targets)
|
| 160 |
+
- [x] Integrate DAVIS matrix from TDC (both actives pKd ≥ 7 and inactives pKd ≤ 5)
|
| 161 |
+
- [x] Run compound/target standardization on all extracted data (multiprocessing for RDKit)
|
| 162 |
+
- [x] Run cross-DB deduplication + **overlap analysis** (vs DAVIS, TDC, DUD-E, LIT-PCBA)
|
| 163 |
+
- [x] Assign confidence tiers (gold/silver/bronze/copper — lowercase, matching DDL CHECK constraint)
|
| 164 |
+
- [x] **Extract ChEMBL positives**: 883K → 863K after 21K overlap removal (pChEMBL ≥ 6, shared targets only)
|
| 165 |
+
- [x] **Positive-negative pairing**: M1 balanced (1.73M, 1:1) + M1 realistic (9.49M, 1:10). Zero compound-target overlap verified.
|
| 166 |
+
- [x] **Borderline exclusion**: pChEMBL 4.5–5.5 removed from both pools
|
| 167 |
+
- [x] Spot-check top 100 most-duplicated compounds (manual QC checkpoint)
|
| 168 |
+
- [x] Run data leakage check: cold split leaks = 0, cross-source overlaps documented
|
| 169 |
+
|
| 170 |
+
### Week 3-5: Benchmark Construction (ML + LLM)
|
| 171 |
+
|
| 172 |
+
**ML Track:**
|
| 173 |
+
- [x] Implement 3 must-have splits (Random, Cold-Compound, Cold-Target) + DDB for Exp 4
|
| 174 |
+
- [x] Implement ML evaluation metrics: LogAUC[0.001,0.1], BEDROC, EF@1%, EF@5%, AUPRC, MCC, AUROC
|
| 175 |
+
- [x] (Should have) Add Cold-Both, Temporal, Scaffold splits (all 6 implemented)
|
| 176 |
+
|
| 177 |
+
**LLM Track:** ✅ INFRASTRUCTURE COMPLETE (2026-03-12)
|
| 178 |
+
- [x] Design prompt templates for L1, L2, L4 (priority tasks) → `llm_prompts.py`
|
| 179 |
+
- [x] Construct L1 dataset: 2,000 MCQ from NegBioDB entries → `build_l1_dataset.py`
|
| 180 |
+
- [x] Construct L2 dataset: 116 candidates (semi-automated) → `build_l2_dataset.py`
|
| 181 |
+
- [x] Construct L4 dataset: 500 tested/untested pairs → `build_l4_dataset.py`
|
| 182 |
+
- [x] Implement automated evaluation scripts → `llm_eval.py` (L1: accuracy/F1, L2: entity F1, L4: classification F1)
|
| 183 |
+
- [x] Build compound name cache → `compound_names.parquet` (144,633 names from ChEMBL)
|
| 184 |
+
- [x] Construct L3 dataset: 50 pilot reasoning examples → `build_l3_dataset.py`
|
| 185 |
+
- [x] LLM client (vLLM + Gemini) → `llm_client.py`
|
| 186 |
+
- [x] SLURM templates + batch submission → `run_llm_local.slurm`, `run_llm_gemini.slurm`, `submit_llm_all.sh`
|
| 187 |
+
- [x] Results aggregation → `collect_llm_results.py` (Table 2)
|
| 188 |
+
- [x] 54 new tests (29 eval + 25 dataset), 329 total pass
|
| 189 |
+
- [ ] **L2 gold annotation**: 15–20h human review needed for `l2_gold.jsonl`
|
| 190 |
+
|
| 191 |
+
**Shared:**
|
| 192 |
+
- [ ] Generate Croissant machine-readable metadata (mandatory for submission)
|
| 193 |
+
- [ ] **Validate Croissant** with `mlcroissant` library. Gate: `mlcroissant.Dataset('metadata.json')` runs without errors
|
| 194 |
+
- [ ] Write Datasheet for Datasets (Gebru et al. template)
|
| 195 |
+
|
| 196 |
+
### Week 5-7: Baseline Experiments (ML + LLM)
|
| 197 |
+
|
| 198 |
+
**ML Baselines:**
|
| 199 |
+
|
| 200 |
+
| Model | Type | Priority | Runs (3 splits) | Status |
|
| 201 |
+
|-------|------|----------|-----------------|--------|
|
| 202 |
+
| DeepDTA | Sequence CNN | Must have | 3 | ✅ Implemented |
|
| 203 |
+
| GraphDTA | Graph neural network | Must have | 3 | ✅ Implemented |
|
| 204 |
+
| DrugBAN | Bilinear attention | Must have | 3 | ✅ Implemented |
|
| 205 |
+
| Random Forest | Traditional ML | Should have | 3 | Planned |
|
| 206 |
+
| XGBoost | Traditional ML | Should have | 3 | Planned |
|
| 207 |
+
| DTI-LM | Language model-based | Nice to have | 3 | Planned |
|
| 208 |
+
| EviDTI | Evidential/uncertainty | Nice to have | 3 | Planned |
|
| 209 |
+
|
| 210 |
+
**Must-have ML: 9 baseline runs (3 models × 3 splits) + 6 Exp 1 (2 random conditions) + 3 Exp 4 (DDB split) = 18 total (~36-72 GPU-hours, 3-4 days)**
|
| 211 |
+
|
| 212 |
+
> **Status (2026-03-13):** All 18/18 ML baseline runs COMPLETE on Cayuga HPC. Results in `results/baselines/`. 3 timed-out DrugBAN jobs recovered via `eval_checkpoint.py`. Key findings: degree-matched negatives inflate LogAUC by +0.112 avg; cold-target LogAUC drops to 0.15–0.33; DDB ≈ random (≤0.010 diff).
|
| 213 |
+
|
| 214 |
+
**LLM Baselines (all free):**
|
| 215 |
+
|
| 216 |
+
| Model | Access | Priority |
|
| 217 |
+
|-------|--------|----------|
|
| 218 |
+
| Gemini 2.5 Flash | Free API (250 RPD) | Must have |
|
| 219 |
+
| Llama 3.3 70B | Ollama local | Must have |
|
| 220 |
+
| Mistral 7B | Ollama local | Must have |
|
| 221 |
+
| Phi-3.5 3.8B | Ollama local | Should have |
|
| 222 |
+
| Qwen2.5 7B | Ollama local | Should have |
|
| 223 |
+
|
| 224 |
+
**Must-have LLM: 3 models × 3 tasks (L1,L2,L4) × 2 configs (zero-shot, 3-shot) = 18 eval runs (all automated)**
|
| 225 |
+
|
| 226 |
+
**Flagship models (post-stabilization):**
|
| 227 |
+
- GPT-4/4.1, Claude Sonnet/Opus, Gemini Pro — added to leaderboard later
|
| 228 |
+
|
| 229 |
+
**Must-have experiments (minimum for paper):**
|
| 230 |
+
- [x] **Exp 1: NegBioDB vs. random negatives** ✅ COMPLETE — degree-matched avg +0.112 over negbiodb → benchmark inflation confirmed
|
| 231 |
+
- [x] **Exp 4: Node degree bias** ✅ COMPLETE — DDB ≈ random (≤0.010 diff) → degree balancing alone not harder
|
| 232 |
+
- [ ] **Exp 9: LLM vs. ML comparison** (L1 vs. M1 on matched test set — reuses baseline results; awaiting LLM runs)
|
| 233 |
+
- [ ] **Exp 10: LLM extraction quality** (L2 entity F1 — awaiting LLM runs)
|
| 234 |
+
|
| 235 |
+
**Should-have experiments (strengthen paper, no extra training):**
|
| 236 |
+
- [ ] Exp 5: Cross-database consistency (analysis only, no training)
|
| 237 |
+
- [ ] Exp 7: Target class coverage analysis (analysis only)
|
| 238 |
+
- [ ] Exp 11: Prompt strategy comparison (add CoT config to LLM baselines)
|
| 239 |
+
- [ ] L3 task + Exp 12: LLM-as-Judge reliability (1,530 judge calls = 6 days)
|
| 240 |
+
|
| 241 |
+
**Nice-to-have experiments (defer to camera-ready):**
|
| 242 |
+
- [ ] Exp 2: Confidence tier discrimination
|
| 243 |
+
- [ ] Exp 3: Assay context dependency (with assay format stratification)
|
| 244 |
+
- [ ] Exp 6: Temporal generalization
|
| 245 |
+
- [ ] Exp 8: LIT-PCBA recapitulation
|
| 246 |
+
|
| 247 |
+
### Week 8-10: Paper Writing
|
| 248 |
+
|
| 249 |
+
- [ ] Write benchmark paper (**9 pages** + unlimited appendix)
|
| 250 |
+
- [ ] Create key figures (see `paper/scripts/generate_figures.py`)
|
| 251 |
+
- [ ] **Paper structure (9 pages)**: Intro (1.5) → DB Design (1.5) → Benchmark (1.5) → Experiments (3) → Discussion (1.5)
|
| 252 |
+
- [ ] **Appendix contents**: Full schema DDL, all metric tables, L2 annotation details, few-shot examples, Datasheet
|
| 253 |
+
- [ ] Python download script: `pip install negbiodb` or simple wget script
|
| 254 |
+
- [ ] Host dataset (HuggingFace primary + Zenodo DOI for archival)
|
| 255 |
+
- [ ] Author ethical statement
|
| 256 |
+
- [ ] **Dockerfile** for full pipeline reproducibility: Python 3.11, rdkit, torch, chembl_downloader, pyarrow, mlcroissant. Must reproduce full pipeline from raw data → final benchmark export
|
| 257 |
+
|
| 258 |
+
### Week 10-11: Review & Submit
|
| 259 |
+
|
| 260 |
+
- [ ] Internal review and polish
|
| 261 |
+
- [ ] Submit abstract (~May 1)
|
| 262 |
+
- [ ] Submit full paper (~May 15)
|
| 263 |
+
- [ ] Post ArXiv preprint (same day or before submission)
|
| 264 |
+
|
| 265 |
+
---
|
| 266 |
+
|
| 267 |
+
## Phase 1-CT: Clinical Trial Failure Domain
|
| 268 |
+
|
| 269 |
+
> Initiated: 2026-03-17 | Pipeline code + data loading complete, benchmark design complete
|
| 270 |
+
|
| 271 |
+
### Step CT-1: Infrastructure ✅ COMPLETE
|
| 272 |
+
|
| 273 |
+
- [x] CT schema design (2 migrations: 001 initial + 002 expert review fixes)
|
| 274 |
+
- [x] 5 pipeline modules: etl_aact, etl_classify, drug_resolver, etl_outcomes, ct_db
|
| 275 |
+
- [x] 138 tests passing
|
| 276 |
+
- [x] Data download scripts for all 4 sources
|
| 277 |
+
|
| 278 |
+
### Step CT-2: Data Loading ✅ COMPLETE
|
| 279 |
+
|
| 280 |
+
- [x] AACT ETL: 216,987 trials, 476K trial-interventions, 372K trial-conditions
|
| 281 |
+
- [x] Failure classification (3-tier): 132,925 results (bronze 60K / silver 28K / gold 23K / copper 20K)
|
| 282 |
+
- [x] Open Targets: 32,782 intervention-target mappings
|
| 283 |
+
- [x] Pair aggregation: 102,850 intervention-condition pairs
|
| 284 |
+
|
| 285 |
+
### Step CT-3: Enrichment & Resolution ✅ COMPLETE
|
| 286 |
+
|
| 287 |
+
- [x] Outcome enrichment: +66 AACT p-values, +31,969 Shi & Du SAE records
|
| 288 |
+
- [x] Drug resolution Steps 1-2: ChEMBL exact (18K) + PubChem API
|
| 289 |
+
- [x] Drug resolution Step 3: Fuzzy matching — 15,616 resolved
|
| 290 |
+
- [x] Drug resolution Step 4: Manual overrides — 291 resolved (88 entries used)
|
| 291 |
+
- [x] Pair aggregation refresh (post-resolution) — 102,850 pairs
|
| 292 |
+
- [x] Post-run coverage analysis — 36,361/176,741 (20.6%) ChEMBL, 27,534 SMILES, 66,393 targets
|
| 293 |
+
|
| 294 |
+
### Step CT-4: Analysis & Benchmark Design ✅ COMPLETE
|
| 295 |
+
|
| 296 |
+
- [x] Data quality analysis script (`scripts_ct/analyze_ct_data.py`) — 16 queries, JSON+MD output
|
| 297 |
+
- [x] Data quality report (`results/ct/ct_data_quality.md`)
|
| 298 |
+
- [x] ML benchmark design
|
| 299 |
+
- 3 tasks: CT-M1 (binary), CT-M2 (7-way category), CT-M3 (phase transition, deferred)
|
| 300 |
+
- 6 split strategies, 3 models (XGBoost, MLP, GNN+Tabular)
|
| 301 |
+
- 3 experiments: negative source, generalization, temporal
|
| 302 |
+
- [x] LLM benchmark design
|
| 303 |
+
- 4 levels: CT-L1 (5-way MCQ), CT-L2 (extraction), CT-L3 (reasoning), CT-L4 (discrimination)
|
| 304 |
+
- 5 models, anti-contamination analysis
|
| 305 |
+
|
| 306 |
+
### Step CT-5: ML Export & Splits ✅ COMPLETE
|
| 307 |
+
|
| 308 |
+
- [x] CT export module (`src/negbiodb_ct/ct_export.py`)
|
| 309 |
+
- [x] CTO success trials extraction (CT-M1 positive class)
|
| 310 |
+
- [x] Feature engineering (drug FP + mol properties + condition one-hot + trial design)
|
| 311 |
+
- [x] 6 split strategies implementation
|
| 312 |
+
|
| 313 |
+
### Step CT-6: ML Baseline Experiments ✅ COMPLETE (108/108 runs)
|
| 314 |
+
|
| 315 |
+
- [x] XGBoost baseline (CT-M1 + CT-M2)
|
| 316 |
+
- [x] MLP baseline
|
| 317 |
+
- [x] GNN+Tabular baseline
|
| 318 |
+
- [x] Key finding: CT-M1 trivially separable on NegBioDB negatives (AUROC=1.0); M2 XGBoost macro-F1=0.51
|
| 319 |
+
|
| 320 |
+
### Step CT-7: LLM Benchmark Execution ✅ COMPLETE (80/80 runs)
|
| 321 |
+
|
| 322 |
+
- [x] CT-L1/L2/L3/L4 dataset construction
|
| 323 |
+
- [x] CT prompt templates + evaluation functions
|
| 324 |
+
- [x] Inference runs on Cayuga HPC (5 models × 4 levels × 4 configs)
|
| 325 |
+
- [x] Key finding: CT L4 MCC 0.48–0.56 — highest discrimination across domains
|
| 326 |
+
|
| 327 |
+
---
|
| 328 |
+
|
| 329 |
+
## Phase 1b: Post-Submission Expansion (Months 3-6)
|
| 330 |
+
|
| 331 |
+
### Data Expansion (if not at 10K+ for submission)
|
| 332 |
+
- [ ] Complete PubChem BioAssay extraction (full confirmatory set)
|
| 333 |
+
- [ ] LLM text mining pipeline activation (PubMed abstracts)
|
| 334 |
+
- [ ] Supplementary materials table extraction (pilot)
|
| 335 |
+
|
| 336 |
+
### Benchmark Refinement
|
| 337 |
+
- [ ] Add remaining ML and LLM baseline models
|
| 338 |
+
- [ ] Complete all 12 validation experiments (8 ML + 4 LLM)
|
| 339 |
+
- [ ] Complete LLM tasks L5, L6 datasets
|
| 340 |
+
- [ ] Add flagship LLM evaluations (GPT-4, Claude)
|
| 341 |
+
- [ ] Build public leaderboard (simple GitHub-based, separate ML and LLM tracks)
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
---
|
| 345 |
+
|
| 346 |
+
## Phase 2: Community & Platform (Months 6-18)
|
| 347 |
+
|
| 348 |
+
### 2.1 Platform Development
|
| 349 |
+
- [ ] Web interface (search, browse, download)
|
| 350 |
+
- [ ] Python library: `pip install negbiodb`
|
| 351 |
+
- [ ] REST API with tiered access
|
| 352 |
+
- [ ] Community submission portal with controlled vocabularies
|
| 353 |
+
- [ ] Leaderboard system
|
| 354 |
+
|
| 355 |
+
### 2.2 Community Building
|
| 356 |
+
- [ ] GitHub repository with documentation and tutorials
|
| 357 |
+
- [ ] Partner with SGC and Target 2035/AIRCHECK for data access
|
| 358 |
+
- [ ] Engage with DREAM challenge community
|
| 359 |
+
- [ ] Tutorial at relevant workshop
|
| 360 |
+
- [ ] Researcher incentive design (citation credit, DOI per submission)
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
---
|
| 364 |
+
|
| 365 |
+
## Schema Design
|
| 366 |
+
|
| 367 |
+
### Common Layer
|
| 368 |
+
|
| 369 |
+
```
|
| 370 |
+
NegativeResult {
|
| 371 |
+
id: UUID
|
| 372 |
+
compound_id: InChIKey + ChEMBL ID + PubChem CID
|
| 373 |
+
target_id: UniProt ID + ChEMBL Target ID
|
| 374 |
+
|
| 375 |
+
// Core negative result
|
| 376 |
+
result_type: ENUM [hard_negative, conditional_negative, methodological_negative,
|
| 377 |
+
hypothesis_negative, dose_time_negative]
|
| 378 |
+
confidence_tier: ENUM [gold, silver, bronze, copper]
|
| 379 |
+
|
| 380 |
+
// Quantitative evidence
|
| 381 |
+
activity_value: FLOAT (IC50, Kd, Ki, EC50)
|
| 382 |
+
activity_unit: STRING
|
| 383 |
+
activity_type: STRING
|
| 384 |
+
pchembl_value: FLOAT
|
| 385 |
+
inactivity_threshold: FLOAT
|
| 386 |
+
max_concentration_tested: FLOAT
|
| 387 |
+
|
| 388 |
+
// Assay context (BAO-based)
|
| 389 |
+
assay_type: BAO term
|
| 390 |
+
assay_format: ENUM [biochemical, cell-based, in_vivo]
|
| 391 |
+
assay_technology: STRING
|
| 392 |
+
detection_method: STRING
|
| 393 |
+
cell_line: STRING (if cell-based)
|
| 394 |
+
organism: STRING
|
| 395 |
+
|
| 396 |
+
// Quality metrics
|
| 397 |
+
z_factor: FLOAT
|
| 398 |
+
ssmd: FLOAT
|
| 399 |
+
num_replicates: INT
|
| 400 |
+
screen_type: ENUM [primary_single_point, confirmatory_dose_response,
|
| 401 |
+
counter_screen, orthogonal_assay]
|
| 402 |
+
|
| 403 |
+
// Provenance
|
| 404 |
+
source_db: STRING (PubChem, ChEMBL, literature, community)
|
| 405 |
+
source_id: STRING (assay ID, paper DOI)
|
| 406 |
+
extraction_method: ENUM [database_direct, text_mining, llm_extracted,
|
| 407 |
+
community_submitted]
|
| 408 |
+
curator_validated: BOOLEAN
|
| 409 |
+
|
| 410 |
+
// Target context (DTO-based)
|
| 411 |
+
target_type: DTO term
|
| 412 |
+
target_family: STRING (kinase, GPCR, ion_channel, etc.)
|
| 413 |
+
target_development_level: ENUM [Tclin, Tchem, Tbio, Tdark]
|
| 414 |
+
|
| 415 |
+
// Metadata
|
| 416 |
+
created_at: TIMESTAMP
|
| 417 |
+
updated_at: TIMESTAMP
|
| 418 |
+
related_positive_results: [UUID] (links to known actives for same target)
|
| 419 |
+
}
|
| 420 |
+
```
|
| 421 |
+
|
| 422 |
+
### Biology/DTI Domain Layer
|
| 423 |
+
|
| 424 |
+
```
|
| 425 |
+
DTIContext {
|
| 426 |
+
negative_result_id: UUID (FK)
|
| 427 |
+
binding_site: STRING (orthosteric, allosteric, unknown)
|
| 428 |
+
selectivity_data: BOOLEAN (part of selectivity panel?)
|
| 429 |
+
species_tested: STRING
|
| 430 |
+
counterpart_species_result: STRING (active in other species?)
|
| 431 |
+
cell_permeability_issue: BOOLEAN
|
| 432 |
+
compound_solubility: FLOAT
|
| 433 |
+
compound_stability: STRING
|
| 434 |
+
}
|
| 435 |
+
```
|
| 436 |
+
|
| 437 |
+
---
|
| 438 |
+
|
| 439 |
+
## Benchmark Design (NegBioBench) — Dual ML + LLM Track
|
| 440 |
+
|
| 441 |
+
### Track A: Traditional ML Tasks
|
| 442 |
+
|
| 443 |
+
| Task | Input | Output | Primary Metric |
|
| 444 |
+
|------|-------|--------|----------------|
|
| 445 |
+
| **M1: DTI Binary Prediction** | (compound SMILES, target sequence) | Active / Inactive | LogAUC[0.001,0.1], AUPRC |
|
| 446 |
+
| **M2: Negative Confidence Prediction** | (SMILES, sequence, assay features) | gold/silver/bronze/copper | Weighted F1, MCC |
|
| 447 |
+
| **M3: Activity Value Regression** | (SMILES, sequence) | pIC50 / pKd | RMSE, R², Spearman ρ |
|
| 448 |
+
|
| 449 |
+
**ML Baselines:** DeepDTA, GraphDTA, DrugBAN, RF, XGBoost, DTI-LM, EviDTI
|
| 450 |
+
|
| 451 |
+
### Track B: LLM Tasks
|
| 452 |
+
|
| 453 |
+
| Task | Input | Output | Metric | Eval Method |
|
| 454 |
+
|------|-------|--------|--------|-------------|
|
| 455 |
+
| **L1: Negative DTI Classification** | Natural language description | Active/Inactive/Inconclusive/Conditional (MCQ) | Accuracy, F1, MCC | Automated |
|
| 456 |
+
| **L2: Negative Result Extraction** | Paper abstract | Structured JSON (compound, target, outcome) | Schema compliance, Entity F1, STED | Automated |
|
| 457 |
+
| **L3: Inactivity Reasoning** | Confirmed negative + context | Scientific explanation | 4-dim rubric (accuracy, reasoning, completeness, specificity) | LLM-as-Judge + human sample |
|
| 458 |
+
| **L4: Tested-vs-Untested Discrimination** | Compound-target pairs | Tested/Untested + evidence | Accuracy, F1, evidence quality | Automated + spot-check |
|
| 459 |
+
| **L5: Assay Context Reasoning** | Negative result + condition changes | Prediction + reasoning per scenario | Prediction accuracy, reasoning quality | LLM-as-Judge |
|
| 460 |
+
| **L6: Evidence Quality Assessment** | Negative result + metadata | Confidence tier + justification | Tier F1, justification quality | Automated + LLM-judge |
|
| 461 |
+
|
| 462 |
+
**LLM Baselines (Phase 1 — Free):** Gemini 2.5 Flash, Llama 3.3, Mistral 7B, Phi-3.5, Qwen2.5
|
| 463 |
+
**LLM Baselines (Phase 2 — Flagship):** GPT-4, Claude Sonnet/Opus, Gemini Pro
|
| 464 |
+
**LLM-as-Judge:** Gemini 2.5 Flash free tier (validated against human annotations)
|
| 465 |
+
|
| 466 |
+
### Track C: Cross-Track (Future)
|
| 467 |
+
|
| 468 |
+
| Task | Description |
|
| 469 |
+
|------|-------------|
|
| 470 |
+
| **C1: Ensemble Prediction** | Combine ML model scores + LLM reasoning — does LLM improve ML? |
|
| 471 |
+
|
| 472 |
+
### Splitting Strategies (7 total, for Track A)
|
| 473 |
+
1. Random (stratified 70/10/20)
|
| 474 |
+
2. Cold compound (Butina clustering on Murcko scaffolds)
|
| 475 |
+
3. Cold target (by UniProt accession)
|
| 476 |
+
4. Cold both (compound + target unseen)
|
| 477 |
+
5. Temporal (train < 2020, val 2020-2022, test > 2022)
|
| 478 |
+
6. Scaffold (Murcko scaffold cluster-based)
|
| 479 |
+
7. DDB — Degree Distribution Balanced (addresses node degree bias)
|
| 480 |
+
|
| 481 |
+
### Evaluation Metrics (Track A)
|
| 482 |
+
|
| 483 |
+
| Metric | Type | Role |
|
| 484 |
+
|--------|------|------|
|
| 485 |
+
| **LogAUC[0.001,0.1]** | Enrichment | **Primary ranking metric** |
|
| 486 |
+
| **BEDROC (α=20)** | Enrichment | Early enrichment |
|
| 487 |
+
| **EF@1%, EF@5%** | Enrichment | Top-ranked performance |
|
| 488 |
+
| **AUPRC** | Ranking | **Secondary ranking metric** |
|
| 489 |
+
| **MCC** | Classification | Balanced classification |
|
| 490 |
+
| **AUROC** | Ranking | Backward compatibility only (not for ranking) |
|
| 491 |
+
|
| 492 |
+
### LLM Evaluation Configuration
|
| 493 |
+
- **Full benchmark** (5 configs): zero-shot, 3-shot, 5-shot, CoT, CoT+3-shot
|
| 494 |
+
- **Must-have** (2 configs): zero-shot, 3-shot only (see research/08 §3)
|
| 495 |
+
- **Should-have** (add CoT): 3 configs total for Exp 11 (prompt strategy comparison)
|
| 496 |
+
- 3 runs per evaluation, report mean ± std
|
| 497 |
+
- Temperature = 0, prompts version-controlled
|
| 498 |
+
- Anti-contamination: temporal holdout + paraphrased variants + contamination detection
|
| 499 |
+
|
| 500 |
+
---
|
| 501 |
+
|
| 502 |
+
## Phase 3: Scale & Sustainability (Months 18-36)
|
| 503 |
+
|
| 504 |
+
### 3.1 Data Expansion
|
| 505 |
+
- [ ] Expand to 100K+ curated negative DTIs
|
| 506 |
+
- [ ] Full LLM-based literature mining pipeline (PubMed/PMC)
|
| 507 |
+
- [ ] Supplementary materials table extraction (Table Transformer)
|
| 508 |
+
- [ ] Integrate Target 2035 AIRCHECK data as it becomes available
|
| 509 |
+
- [ ] Begin Gene Function (KO/KD) negative data collection
|
| 510 |
+
|
| 511 |
+
### 3.2 Benchmark Evolution (NegBioBench v1.0)
|
| 512 |
+
- [ ] Track A expansion: multi-modal integration (protein structures, assay images)
|
| 513 |
+
- [ ] Track B expansion: additional tasks — Failure Diagnosis, Experimental Design Critique, Literature Contradiction Detection
|
| 514 |
+
- [ ] Track C: Cross-track ensemble evaluation (ML + LLM combined prediction)
|
| 515 |
+
- [ ] Specialized bio-LLM evaluations (LlaSMol, BioMedGPT, DrugChat)
|
| 516 |
+
- [ ] Regular leaderboard updates (both ML and LLM tracks)
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
---
|
| 520 |
+
|
| 521 |
+
## Phase 4: Domain Expansion (Months 36+)
|
| 522 |
+
|
| 523 |
+
```
|
| 524 |
+
DTI (Phase 1 — COMPLETE)
|
| 525 |
+
│
|
| 526 |
+
├── Clinical Trial Failure (Phase 1-CT — COMPLETE ✅)
|
| 527 |
+
│ └── 132,925 failure results loaded, benchmarks designed
|
| 528 |
+
│
|
| 529 |
+
├── Gene Function (CRISPR KO/KD negatives)
|
| 530 |
+
│ └── Leverage CRISPR screen data, DepMap
|
| 531 |
+
│
|
| 532 |
+
├── Chemistry Domain Layer
|
| 533 |
+
│ └── Failed reactions, yield = 0 data
|
| 534 |
+
│
|
| 535 |
+
└── Materials Science Domain Layer
|
| 536 |
+
└── HTEM DB integration, failed synthesis conditions
|
| 537 |
+
```
|
| 538 |
+
|
| 539 |
+
---
|
| 540 |
+
|
| 541 |
+
## Key Milestones (Revised)
|
| 542 |
+
|
| 543 |
+
| Milestone | Target Date | Deliverable | Status |
|
| 544 |
+
|-----------|------------|-------------|--------|
|
| 545 |
+
| Schema v1.0 finalized | Week 2 (Mar 2026) | SQLite schema + standardization pipeline | ✅ Done |
|
| 546 |
+
| Data extraction complete | Week 3-4 (Mar 2026) | **30.5M** negative results (far exceeded 10K target) | ✅ Done |
|
| 547 |
+
| ML export & splits | Week 3 (Mar 2026) | 6 split strategies + M1 benchmark datasets | ✅ Done |
|
| 548 |
+
| ML evaluation metrics | Week 3 (Mar 2026) | 7 metrics, 329 tests | ✅ Done |
|
| 549 |
+
| ML baseline infrastructure | Week 4 (Mar 2026) | 3 models + SLURM harness | ✅ Done |
|
| 550 |
+
| ML baseline experiments | Week 5 (Mar 2026) | 18/18 runs complete, key findings confirmed | ✅ Done |
|
| 551 |
+
| LLM benchmark infrastructure | Week 5 (Mar 2026) | L1–L4 datasets, prompts, eval, SLURM templates | ✅ Done |
|
| 552 |
+
| LLM benchmark execution | Week 5-6 (Mar 2026) | 81/81 runs complete (9 models × 4 tasks + configs) | ✅ Done |
|
| 553 |
+
| Python library v0.1 | Month 8 | `pip install negbiodb` |
|
| 554 |
+
| Web platform launch | Month 12 | Public access + leaderboard |
|
| 555 |
+
| 100K+ entries | Month 24 | Scale milestone |
|
| 556 |
+
|
| 557 |
+
---
|
| 558 |
+
|
| 559 |
+
---
|
| 560 |
+
|
| 561 |
+
---
|
config.yaml
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# NegBioDB Configuration
|
| 2 |
+
# Source of truth for thresholds, paths, and pipeline settings.
|
| 3 |
+
|
| 4 |
+
# === Inactivity Thresholds ===
|
| 5 |
+
inactivity_threshold_nm: 10000 # 10 uM in nM; compound inactive if IC50/Ki/Kd > this
|
| 6 |
+
positive_pchembl_min: 6.0 # pChEMBL >= 6 for positives (IC50 <= 1 uM)
|
| 7 |
+
borderline_exclusion:
|
| 8 |
+
lower: 4.5 # Exclude pChEMBL >= 4.5 from negative pool
|
| 9 |
+
upper: 6.0 # Matches positive_pchembl_min; effective gap: [4.5, 6.0)
|
| 10 |
+
davis_inactive_pkd_threshold: 5.0 # DAVIS: inactive if pKd <= 5 (NOT strictly < 5)
|
| 11 |
+
davis_active_pkd_threshold: 7.0 # DAVIS: active if pKd >= 7 (Kd <= 100 nM)
|
| 12 |
+
|
| 13 |
+
# === ChEMBL ETL Policy ===
|
| 14 |
+
# Core benchmark should remain conservative by default.
|
| 15 |
+
# Optional activity_comment route can be enabled for recall-focused expansion.
|
| 16 |
+
chembl_etl:
|
| 17 |
+
include_activity_comment: false
|
| 18 |
+
inactive_activity_comments:
|
| 19 |
+
- "Not Active"
|
| 20 |
+
- "Inactive"
|
| 21 |
+
- "Not active"
|
| 22 |
+
- "inactive"
|
| 23 |
+
|
| 24 |
+
# === ML Split Configuration ===
|
| 25 |
+
split_ratios:
|
| 26 |
+
train: 0.7
|
| 27 |
+
val: 0.1
|
| 28 |
+
test: 0.2
|
| 29 |
+
random_seed: 42
|
| 30 |
+
|
| 31 |
+
# === Processing ===
|
| 32 |
+
pubchem_chunksize: 100000 # Rows per chunk for streaming PubChem processing
|
| 33 |
+
|
| 34 |
+
pubchem_etl:
|
| 35 |
+
chunksize: 100000
|
| 36 |
+
human_only: true
|
| 37 |
+
|
| 38 |
+
bindingdb_etl:
|
| 39 |
+
chunksize: 100000
|
| 40 |
+
inactive_threshold_nm: 10000
|
| 41 |
+
human_only: true
|
| 42 |
+
|
| 43 |
+
# === Confidence Tier Ordering (gold = highest) ===
|
| 44 |
+
confidence_tiers:
|
| 45 |
+
- gold
|
| 46 |
+
- silver
|
| 47 |
+
- bronze
|
| 48 |
+
- copper
|
| 49 |
+
|
| 50 |
+
# === Paths (relative to project root) ===
|
| 51 |
+
paths:
|
| 52 |
+
data_dir: data
|
| 53 |
+
exports_dir: exports
|
| 54 |
+
migrations_dir: migrations
|
| 55 |
+
database: data/negbiodb.db
|
| 56 |
+
|
| 57 |
+
# === Download URLs ===
|
| 58 |
+
downloads:
|
| 59 |
+
pubchem:
|
| 60 |
+
# Legacy keys kept for backward compatibility
|
| 61 |
+
url: "https://ftp.ncbi.nlm.nih.gov/pubchem/Bioassay/Extras/bioactivities.tsv.gz"
|
| 62 |
+
dest: data/pubchem/bioactivities.tsv.gz
|
| 63 |
+
min_size_bytes: 1000000000
|
| 64 |
+
# Required files for full PubChem ETL
|
| 65 |
+
bioassays_url: "https://ftp.ncbi.nlm.nih.gov/pubchem/Bioassay/Extras/bioassays.tsv.gz"
|
| 66 |
+
bioassays_dest: data/pubchem/bioassays.tsv.gz
|
| 67 |
+
bioassays_min_size_bytes: 10000000
|
| 68 |
+
aid_uniprot_url: "https://ftp.ncbi.nlm.nih.gov/pubchem/Bioassay/Extras/Aid2GeneidAccessionUniProt.gz"
|
| 69 |
+
aid_uniprot_dest: data/pubchem/Aid2GeneidAccessionUniProt.gz
|
| 70 |
+
aid_uniprot_min_size_bytes: 1000000
|
| 71 |
+
sid_cid_smiles_url: "https://ftp.ncbi.nlm.nih.gov/pubchem/Bioassay/Extras/Sid2CidSMILES.gz"
|
| 72 |
+
sid_cid_smiles_dest: data/pubchem/Sid2CidSMILES.gz
|
| 73 |
+
sid_cid_smiles_min_size_bytes: 1000000
|
| 74 |
+
sid_lookup_db: data/pubchem/sid_lookup.sqlite
|
| 75 |
+
chembl:
|
| 76 |
+
dest_dir: data/chembl
|
| 77 |
+
bindingdb:
|
| 78 |
+
servlet_url: "https://www.bindingdb.org/rwd/bind/chemsearch/marvin/SDFdownload.jsp?download_file=/rwd/bind/downloads/BindingDB_All_202603_tsv.zip"
|
| 79 |
+
file_url: "https://www.bindingdb.org/rwd/bind/downloads/BindingDB_All_202603_tsv.zip"
|
| 80 |
+
dest_dir: data/bindingdb
|
| 81 |
+
min_size_bytes: 500000000
|
| 82 |
+
davis:
|
| 83 |
+
base_url: "https://raw.githubusercontent.com/dingyan20/Davis-Dataset-for-DTA-Prediction/master"
|
| 84 |
+
files:
|
| 85 |
+
- drugs.csv
|
| 86 |
+
- proteins.csv
|
| 87 |
+
- drug_protein_affinity.csv
|
| 88 |
+
dest_dir: data/davis
|
| 89 |
+
min_rows: 25000
|
| 90 |
+
|
| 91 |
+
# ============================================================
|
| 92 |
+
# Clinical Trial Failure Domain
|
| 93 |
+
# ============================================================
|
| 94 |
+
|
| 95 |
+
ct_domain:
|
| 96 |
+
# === Paths ===
|
| 97 |
+
paths:
|
| 98 |
+
database: data/negbiodb_ct.db
|
| 99 |
+
migrations_dir: migrations_ct
|
| 100 |
+
data_dir: data/ct
|
| 101 |
+
|
| 102 |
+
# === Downloads ===
|
| 103 |
+
downloads:
|
| 104 |
+
aact:
|
| 105 |
+
# AACT pipe-delimited snapshot; URL changes monthly — update as needed
|
| 106 |
+
# Get latest from: https://aact.ctti-clinicaltrials.org/pipe_files
|
| 107 |
+
dest_dir: data/ct/aact
|
| 108 |
+
required_disk_gb: 6.0
|
| 109 |
+
# 13 of 46 AACT tables needed for v1
|
| 110 |
+
tables:
|
| 111 |
+
- studies
|
| 112 |
+
- interventions
|
| 113 |
+
- conditions
|
| 114 |
+
- outcomes
|
| 115 |
+
- outcome_analyses
|
| 116 |
+
- outcome_measurements
|
| 117 |
+
- designs
|
| 118 |
+
- sponsors
|
| 119 |
+
- calculated_values
|
| 120 |
+
- browse_interventions
|
| 121 |
+
- browse_conditions
|
| 122 |
+
- drop_withdrawals
|
| 123 |
+
- documents
|
| 124 |
+
opentargets:
|
| 125 |
+
url: "https://huggingface.co/api/datasets/opentargets/clinical_trial_reason_to_stop/parquet/default/train/0000.parquet"
|
| 126 |
+
dest: data/ct/opentargets/stop_reasons.parquet
|
| 127 |
+
cto:
|
| 128 |
+
url: "https://huggingface.co/api/datasets/chufangao/CTO/parquet/human_labels/test/0.parquet"
|
| 129 |
+
dest: data/ct/cto/cto_outcomes.parquet
|
| 130 |
+
shi_du:
|
| 131 |
+
# Figshare: Shi & Du 2024, doi:10.6084/m9.figshare.c.6860254.v1
|
| 132 |
+
efficacy_url: "https://ndownloader.figshare.com/files/42520528"
|
| 133 |
+
efficacy_dest: data/ct/shi_du/efficacy_df.csv
|
| 134 |
+
safety_url: "https://ndownloader.figshare.com/files/42520534"
|
| 135 |
+
safety_dest: data/ct/shi_du/safety_df.csv
|
| 136 |
+
|
| 137 |
+
# === Failure Classification ===
|
| 138 |
+
failure_category_precedence:
|
| 139 |
+
- safety
|
| 140 |
+
- efficacy
|
| 141 |
+
- pharmacokinetic
|
| 142 |
+
- enrollment
|
| 143 |
+
- strategic
|
| 144 |
+
- regulatory
|
| 145 |
+
- design
|
| 146 |
+
- other
|
| 147 |
+
|
| 148 |
+
# === Drug Name Resolution ===
|
| 149 |
+
drug_resolution:
|
| 150 |
+
fuzzy_threshold: 0.90
|
| 151 |
+
pubchem_rate_limit_per_sec: 5
|
| 152 |
+
pubchem_cache: data/ct/pubchem_name_cache.json
|
| 153 |
+
overrides_file: data/ct/drug_name_overrides.csv
|
| 154 |
+
|
| 155 |
+
# === NLP Classifier ===
|
| 156 |
+
classifier:
|
| 157 |
+
max_features: 10000
|
| 158 |
+
ngram_range: [1, 2]
|
| 159 |
+
cv_folds: 5
|
| 160 |
+
|
| 161 |
+
# ============================================================
|
| 162 |
+
# Protein-Protein Interaction Negative Domain
|
| 163 |
+
# ============================================================
|
| 164 |
+
|
| 165 |
+
ppi_domain:
|
| 166 |
+
# === Paths ===
|
| 167 |
+
paths:
|
| 168 |
+
database: data/negbiodb_ppi.db
|
| 169 |
+
migrations_dir: migrations_ppi
|
| 170 |
+
data_dir: data/ppi
|
| 171 |
+
|
| 172 |
+
# === Downloads ===
|
| 173 |
+
downloads:
|
| 174 |
+
huri:
|
| 175 |
+
hi_union_url: "https://www.interactome-atlas.org/data/HI-union.tsv"
|
| 176 |
+
# hi_iii_url removed — HI-III-20.tsv returns 404 as of 2026-03
|
| 177 |
+
idmapping_url: "https://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/idmapping/by_organism/HUMAN_9606_idmapping.dat.gz"
|
| 178 |
+
dest_dir: data/ppi/huri
|
| 179 |
+
intact:
|
| 180 |
+
url: "https://ftp.ebi.ac.uk/pub/databases/intact/current/psimitab/intact_negative.txt"
|
| 181 |
+
dest: data/ppi/intact/intact_negative.txt
|
| 182 |
+
min_size_bytes: 1000000
|
| 183 |
+
humap:
|
| 184 |
+
base_url: "https://humap3.proteincomplexes.org/static/downloads/humap3"
|
| 185 |
+
neg_train: ComplexPortal_reduced_20230309.neg_train_ppis.txt
|
| 186 |
+
neg_test: ComplexPortal_reduced_20230309.neg_test_ppis.txt
|
| 187 |
+
pos_train: ComplexPortal_reduced_20230309.train_ppis.txt
|
| 188 |
+
pos_test: ComplexPortal_reduced_20230309.test_ppis.txt
|
| 189 |
+
dest_dir: data/ppi/humap
|
| 190 |
+
string:
|
| 191 |
+
links_url: "https://stringdb-downloads.org/download/protein.links.v12.0/9606.protein.links.v12.0.txt.gz"
|
| 192 |
+
mapping_url: "https://stringdb-downloads.org/download/mapping_files/uniprot/human.uniprot_2_string.2018.tsv.gz"
|
| 193 |
+
dest_dir: data/ppi/string
|
| 194 |
+
biogrid:
|
| 195 |
+
url: "https://downloads.thebiogrid.org/Download/BioGRID/Latest-Release/BIOGRID-ALL-LATEST.tab3.zip"
|
| 196 |
+
dest_dir: data/ppi/biogrid
|
| 197 |
+
|
| 198 |
+
# === Evidence Type Priority (best first) ===
|
| 199 |
+
evidence_type_priority:
|
| 200 |
+
- experimental_non_interaction
|
| 201 |
+
- literature_reported
|
| 202 |
+
- ml_predicted_negative
|
| 203 |
+
- low_score_negative
|
| 204 |
+
- compartment_separated
|
| 205 |
+
|
| 206 |
+
# === STRING ETL Settings ===
|
| 207 |
+
string_etl:
|
| 208 |
+
min_protein_degree: 5
|
| 209 |
+
max_negative_pairs: 500000
|
| 210 |
+
|
| 211 |
+
# === Benchmark Split ===
|
| 212 |
+
split_ratios: {train: 0.7, val: 0.1, test: 0.2}
|
| 213 |
+
random_seed: 42
|
docs/appendix_prompts.md
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Appendix A: LLM Benchmark Prompt Templates
|
| 2 |
+
|
| 3 |
+
This appendix documents all prompt templates used in the NegBioDB LLM benchmark (tasks L1--L4). Templates are reproduced verbatim from `src/negbiodb/llm_prompts.py` and `src/negbiodb/llm_eval.py`.
|
| 4 |
+
|
| 5 |
+
## A.1 System Prompt (Shared Across All Tasks)
|
| 6 |
+
|
| 7 |
+
```
|
| 8 |
+
You are a pharmaceutical scientist with expertise in drug-target interactions, assay development, and medicinal chemistry. Provide precise, evidence-based answers.
|
| 9 |
+
```
|
| 10 |
+
|
| 11 |
+
## A.2 L1: Activity Classification (Multiple Choice)
|
| 12 |
+
|
| 13 |
+
### A.2.1 Zero-Shot Template
|
| 14 |
+
|
| 15 |
+
```
|
| 16 |
+
{context}
|
| 17 |
+
```
|
| 18 |
+
|
| 19 |
+
### A.2.2 Few-Shot Template
|
| 20 |
+
|
| 21 |
+
```
|
| 22 |
+
Here are some examples of drug-target interaction classification:
|
| 23 |
+
|
| 24 |
+
{examples}
|
| 25 |
+
|
| 26 |
+
Now classify the following:
|
| 27 |
+
|
| 28 |
+
{context}
|
| 29 |
+
```
|
| 30 |
+
|
| 31 |
+
### A.2.3 Answer Format Instruction
|
| 32 |
+
|
| 33 |
+
```
|
| 34 |
+
Respond with ONLY the letter of the correct answer: A, B, C, or D.
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
The answer format instruction is appended after both zero-shot and few-shot templates.
|
| 38 |
+
|
| 39 |
+
## A.3 L2: Structured Extraction
|
| 40 |
+
|
| 41 |
+
### A.3.1 Zero-Shot Template
|
| 42 |
+
|
| 43 |
+
```
|
| 44 |
+
Extract all negative drug-target interaction results from the following abstract.
|
| 45 |
+
|
| 46 |
+
Abstract:
|
| 47 |
+
{abstract_text}
|
| 48 |
+
|
| 49 |
+
For each negative result found, extract:
|
| 50 |
+
- compound: compound/drug name
|
| 51 |
+
- target: target protein/gene name
|
| 52 |
+
- target_uniprot: UniProt accession (if determinable)
|
| 53 |
+
- activity_type: type of measurement (IC50, Ki, Kd, EC50, etc.)
|
| 54 |
+
- activity_value: reported value with units
|
| 55 |
+
- activity_relation: relation (=, >, <, ~)
|
| 56 |
+
- assay_format: biochemical, cell-based, or in vivo
|
| 57 |
+
- outcome: inactive, weak, or inconclusive
|
| 58 |
+
|
| 59 |
+
Also report:
|
| 60 |
+
- total_inactive_count: total number of inactive results mentioned
|
| 61 |
+
- positive_results_mentioned: true/false
|
| 62 |
+
|
| 63 |
+
Respond in JSON format.
|
| 64 |
+
```
|
| 65 |
+
|
| 66 |
+
### A.3.2 Few-Shot Template
|
| 67 |
+
|
| 68 |
+
```
|
| 69 |
+
Extract negative drug-target interaction results from abstracts.
|
| 70 |
+
|
| 71 |
+
{examples}
|
| 72 |
+
|
| 73 |
+
Now extract from this abstract:
|
| 74 |
+
|
| 75 |
+
Abstract:
|
| 76 |
+
{abstract_text}
|
| 77 |
+
|
| 78 |
+
Respond in JSON format.
|
| 79 |
+
```
|
| 80 |
+
|
| 81 |
+
Few-shot examples include the abstract text and the corresponding gold extraction in JSON format, separated by `---` delimiters.
|
| 82 |
+
|
| 83 |
+
## A.4 L3: Scientific Reasoning
|
| 84 |
+
|
| 85 |
+
### A.4.1 Zero-Shot Template
|
| 86 |
+
|
| 87 |
+
```
|
| 88 |
+
{context}
|
| 89 |
+
|
| 90 |
+
Provide a detailed scientific explanation (3-5 paragraphs) covering:
|
| 91 |
+
1. Structural compatibility between compound and target binding site
|
| 92 |
+
2. Known selectivity profile and mechanism of action
|
| 93 |
+
3. Relevant SAR (structure-activity relationship) data
|
| 94 |
+
4. Pharmacological context and therapeutic implications
|
| 95 |
+
```
|
| 96 |
+
|
| 97 |
+
### A.4.2 Few-Shot Template
|
| 98 |
+
|
| 99 |
+
```
|
| 100 |
+
Here are examples of scientific reasoning about inactive drug-target interactions:
|
| 101 |
+
|
| 102 |
+
{examples}
|
| 103 |
+
|
| 104 |
+
Now explain the following:
|
| 105 |
+
|
| 106 |
+
{context}
|
| 107 |
+
```
|
| 108 |
+
|
| 109 |
+
### A.4.3 LLM-as-Judge Rubric
|
| 110 |
+
|
| 111 |
+
Responses are evaluated by a judge model (Gemini 2.5 Flash) using the following rubric:
|
| 112 |
+
|
| 113 |
+
```
|
| 114 |
+
Rate the following scientific explanation of why a compound is inactive against a target.
|
| 115 |
+
|
| 116 |
+
Compound: {compound_name}
|
| 117 |
+
Target: {target_gene} ({target_uniprot})
|
| 118 |
+
|
| 119 |
+
Explanation to evaluate:
|
| 120 |
+
{response}
|
| 121 |
+
|
| 122 |
+
Rate on these 4 dimensions (1-5 each):
|
| 123 |
+
1. Accuracy: Are the scientific claims factually correct?
|
| 124 |
+
2. Reasoning: Is the logical chain from structure to inactivity sound?
|
| 125 |
+
3. Completeness: Are all relevant factors considered (binding, selectivity, SAR)?
|
| 126 |
+
4. Specificity: Does the explanation use specific molecular details, not generalities?
|
| 127 |
+
|
| 128 |
+
Respond in JSON: {{"accuracy": X, "reasoning": X, "completeness": X, "specificity": X}}
|
| 129 |
+
```
|
| 130 |
+
|
| 131 |
+
The judge returns scores as JSON with four dimensions (accuracy, reasoning, completeness, specificity), each rated 1--5.
|
| 132 |
+
|
| 133 |
+
## A.5 L4: Tested vs Untested Discrimination
|
| 134 |
+
|
| 135 |
+
### A.5.1 Zero-Shot Template
|
| 136 |
+
|
| 137 |
+
```
|
| 138 |
+
{context}
|
| 139 |
+
```
|
| 140 |
+
|
| 141 |
+
### A.5.2 Few-Shot Template
|
| 142 |
+
|
| 143 |
+
```
|
| 144 |
+
Here are examples of tested/untested compound-target pair determination:
|
| 145 |
+
|
| 146 |
+
{examples}
|
| 147 |
+
|
| 148 |
+
Now determine:
|
| 149 |
+
|
| 150 |
+
{context}
|
| 151 |
+
```
|
| 152 |
+
|
| 153 |
+
### A.5.3 Answer Format Instruction
|
| 154 |
+
|
| 155 |
+
```
|
| 156 |
+
Respond with 'tested' or 'untested' on the first line. If tested, provide the evidence source (database, assay ID, or DOI) on the next line.
|
| 157 |
+
```
|
| 158 |
+
|
| 159 |
+
The answer format instruction is appended after both zero-shot and few-shot templates.
|
| 160 |
+
|
| 161 |
+
## A.6 Model Configuration
|
| 162 |
+
|
| 163 |
+
| Parameter | Value |
|
| 164 |
+
|-----------|-------|
|
| 165 |
+
| Temperature | 0.0 (deterministic) |
|
| 166 |
+
| Max output tokens | 1024 (L1/L4), 2048 (L2/L3) |
|
| 167 |
+
| Few-shot sets | 3 independent sets (fs0, fs1, fs2) |
|
| 168 |
+
| Retry policy | Exponential backoff, max 8 retries |
|
| 169 |
+
|
| 170 |
+
### Models
|
| 171 |
+
|
| 172 |
+
| Model | Provider | Inference |
|
| 173 |
+
|-------|----------|-----------|
|
| 174 |
+
| Claude Haiku-4.5 | Anthropic API | Cloud |
|
| 175 |
+
| Gemini 2.5 Flash | Google Gemini API | Cloud |
|
| 176 |
+
| GPT-4o-mini | OpenAI API | Cloud |
|
| 177 |
+
| Qwen2.5-7B-Instruct | vLLM | Local (A100 GPU) |
|
| 178 |
+
| Llama-3.1-8B-Instruct | vLLM | Local (A100 GPU) |
|
| 179 |
+
|
| 180 |
+
Gemini 2.5 Flash uses `thinkingConfig: {thinkingBudget: 0}` to disable internal reasoning tokens and ensure the full output budget is available for the response.
|
docs/methodology_notes.md
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Methodology Notes for Paper
|
| 2 |
+
|
| 3 |
+
These notes address known limitations and design decisions that should be
|
| 4 |
+
documented in the paper methodology section.
|
| 5 |
+
|
| 6 |
+
## Temporal Split Limitation (C3)
|
| 7 |
+
|
| 8 |
+
The temporal split (pre-2020 train / 2020-2021 val / 2022+ test) yields a
|
| 9 |
+
highly imbalanced distribution (train ~99.7%, val ~0.14%, test ~0.14%)
|
| 10 |
+
reflecting the historical concentration of bioactivity data before 2020. We
|
| 11 |
+
retain this split for completeness as a chronological validation, while noting
|
| 12 |
+
that cold-compound and cold-target splits provide more robust generalization
|
| 13 |
+
assessment.
|
| 14 |
+
|
| 15 |
+
## L1 Context Design (C4)
|
| 16 |
+
|
| 17 |
+
L1 provides contextual assay data (activity types and values) alongside the
|
| 18 |
+
question, testing the model's ability to interpret bioactivity data rather
|
| 19 |
+
than factual recall. This is intentional: L4 tests factual recall without
|
| 20 |
+
context, while L1 evaluates data interpretation capability. The context text
|
| 21 |
+
includes activity measurements that inform the correct answer, simulating a
|
| 22 |
+
scientist reviewing assay results.
|
| 23 |
+
|
| 24 |
+
## Contamination Threshold (M12)
|
| 25 |
+
|
| 26 |
+
We flag potential data contamination when pre-2023 accuracy exceeds post-2024
|
| 27 |
+
accuracy by > 15 percentage points. This threshold balances sensitivity to
|
| 28 |
+
temporal bias against random fluctuation in small subsets. Models showing
|
| 29 |
+
higher performance on older data may have encountered these compound-target
|
| 30 |
+
pairs during pre-training.
|
| 31 |
+
|
| 32 |
+
## Scaffold Split Coverage (m1)
|
| 33 |
+
|
| 34 |
+
The scaffold split assigns compounds to folds based on Murcko scaffold
|
| 35 |
+
grouping. The number of unique scaffolds and their pair distribution should
|
| 36 |
+
be reported. If the dataset contains fewer than ~100 unique scaffolds, this
|
| 37 |
+
limitation should be noted as it may reduce the generalization challenge of
|
| 38 |
+
the scaffold split relative to the cold-compound split.
|
experiment_results.md
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# NegBioDB Experiment Results
|
| 2 |
+
|
| 3 |
+
*Last updated: 2026-03-24*
|
| 4 |
+
|
| 5 |
+
## Cross-Domain Summary
|
| 6 |
+
|
| 7 |
+
| Domain | Negatives | ML Status | LLM L1 (acc) | LLM L4 (MCC) |
|
| 8 |
+
|--------|-----------|-----------|--------------|--------------|
|
| 9 |
+
| DTI (Drug-Target Interaction) | 30,459,583 | 24/24 complete | Gemini 1.000 (3-shot) | ≤ 0.18 (near random) |
|
| 10 |
+
| CT (Clinical Trial Failure) | 132,925 | 108/108 complete | Gemini 0.68 | Gemini 0.56 |
|
| 11 |
+
| PPI (Protein-Protein Interaction) | 2,229,670 | 54/54 complete | ~1.000 (3-shot artifact) | Llama 0.441 |
|
| 12 |
+
| GE (Gene Essentiality / DepMap) | 28,759,256 | Seed 42 complete | 4/5 models complete | Pending (Llama) |
|
| 13 |
+
|
| 14 |
+
**Key insight:** LLM discrimination ability (L4 MCC) increases with task complexity: DTI (~0.05–0.18) < PPI (~0.33–0.44) < CT (~0.48–0.56). All domains show evidence of training data contamination in PPI/CT.
|
| 15 |
+
|
| 16 |
+
---
|
| 17 |
+
|
| 18 |
+
## DTI Domain (Drug-Target Interaction)
|
| 19 |
+
|
| 20 |
+
**Status as of 2026-03-24:** ML 24/24 complete, LLM 81/81 complete.
|
| 21 |
+
|
| 22 |
+
### Database
|
| 23 |
+
- 30,459,583 negative results, 5 splits (random/cold_compound/cold_target/scaffold/temporal)
|
| 24 |
+
- 1,725,446 export rows (862,723 pos + 862,723 neg)
|
| 25 |
+
- Source tiers: gold=818,611 / silver=198 / bronze=28,845,632 (PubChem dynamic tiers)
|
| 26 |
+
|
| 27 |
+
### ML Results Summary
|
| 28 |
+
- 3 models: DeepDTA, GraphDTA, DrugBAN × 5 splits × 2 negative types (NegBioDB + uniform_random)
|
| 29 |
+
- NegBioDB negatives: Random split near-perfect AUROC (trivially separable)
|
| 30 |
+
- Control negatives (uniform_random): Harder splits show meaningful degradation
|
| 31 |
+
|
| 32 |
+
### LLM Results (81/81 complete)
|
| 33 |
+
| Task | Best Model | Metric | Value |
|
| 34 |
+
|------|-----------|--------|-------|
|
| 35 |
+
| L1 (MCQ) | Gemini | Accuracy (3-shot) | 1.000 |
|
| 36 |
+
| L4 (Discrim) | Best model | MCC | ≤ 0.18 (all near random) |
|
| 37 |
+
|
| 38 |
+
**Key finding:** DTI LLMs show near-random discrimination (L4 MCC ≤ 0.18). The binding/non-binding decision is too nuanced for zero-shot or few-shot LLMs. This is the hardest domain.
|
| 39 |
+
|
| 40 |
+
---
|
| 41 |
+
|
| 42 |
+
## CT Domain (Clinical Trial Failure)
|
| 43 |
+
|
| 44 |
+
**Status as of 2026-03-20:** ML 108/108 complete, LLM 80/80 complete, L3 judge complete.
|
| 45 |
+
|
| 46 |
+
### Database
|
| 47 |
+
- 132,925 failure results from 216,987 trials
|
| 48 |
+
- Tiers: gold 23,570 / silver 28,505 / bronze 60,223 / copper 20,627
|
| 49 |
+
- 8 failure categories: safety > efficacy > enrollment > strategic > regulatory > design > other (PK=0)
|
| 50 |
+
|
| 51 |
+
### ML Results (CT-M1: Binary Failure Prediction)
|
| 52 |
+
Aggregated over 3 seeds (source: `results/ct_table_m1_aggregated.csv`):
|
| 53 |
+
|
| 54 |
+
| Model | Split | Negatives | AUROC | MCC |
|
| 55 |
+
|-------|-------|-----------|-------|-----|
|
| 56 |
+
| XGBoost | random | NegBioDB | **1.000** | 1.000 |
|
| 57 |
+
| GNN | random | NegBioDB | 1.000 | 1.000 |
|
| 58 |
+
| MLP | random | NegBioDB | 1.000 | 0.992 |
|
| 59 |
+
| XGBoost | random | degree_matched | 0.844 | 0.553 |
|
| 60 |
+
| MLP | random | degree_matched | 0.801 | 0.454 |
|
| 61 |
+
| GNN | random | degree_matched | 0.758 | 0.440 |
|
| 62 |
+
| XGBoost | cold_condition | NegBioDB | 1.000 | 1.000 |
|
| 63 |
+
| XGBoost | cold_drug | NegBioDB | 1.000 | 0.999 |
|
| 64 |
+
|
| 65 |
+
**Key finding:** CT-M1 is trivially separable on NegBioDB negatives (AUROC=1.0 random/cold splits). Control negatives (degree_matched) reveal meaningful discrimination AUROC ~0.76–0.84.
|
| 66 |
+
|
| 67 |
+
### ML Results (CT-M2: 7-way Failure Category)
|
| 68 |
+
Aggregated over 3 seeds (source: `results/ct_table_m2_aggregated.csv`):
|
| 69 |
+
|
| 70 |
+
| Model | Split | Macro-F1 | Weighted-F1 | MCC |
|
| 71 |
+
|-------|-------|----------|-------------|-----|
|
| 72 |
+
| XGBoost | random | **0.510** | 0.751 | 0.637 |
|
| 73 |
+
| XGBoost | degree_balanced | 0.521 | 0.758 | 0.645 |
|
| 74 |
+
| XGBoost | cold_condition | 0.338 | 0.686 | 0.570 |
|
| 75 |
+
| XGBoost | cold_drug | 0.414 | 0.683 | 0.555 |
|
| 76 |
+
| XGBoost | scaffold | 0.193 | 0.567 | 0.374 |
|
| 77 |
+
| XGBoost | temporal | 0.193 | 0.602 | 0.454 |
|
| 78 |
+
| GNN | random | 0.468 | 0.672 | 0.526 |
|
| 79 |
+
| MLP | random | 0.358 | 0.619 | 0.432 |
|
| 80 |
+
|
| 81 |
+
**Key finding:** XGBoost best for M2. Scaffold/temporal splits are hardest (macro-F1 ~0.19). Degree-balanced helps.
|
| 82 |
+
|
| 83 |
+
### LLM Results (80/80 complete)
|
| 84 |
+
| Task | Gemini | GPT-4o-mini | Haiku | Qwen-7B | Llama-8B |
|
| 85 |
+
|------|--------|-------------|-------|---------|---------|
|
| 86 |
+
| L1 (5-way MCQ, acc) | **0.68** | 0.64 | 0.66 | 0.65 | 0.63 |
|
| 87 |
+
| L2 (extraction, field_f1) | 0.75 | 0.73 | 0.48 | **0.81** | 0.77 |
|
| 88 |
+
| L3 (reasoning, /5.0) | ~4.7 | ~4.6 | ~4.4 | ~4.5 | ~4.6 |
|
| 89 |
+
| L4 (discrim, MCC) | **0.56** | 0.49 | 0.50 | 0.48 | 0.50 |
|
| 90 |
+
|
| 91 |
+
**Key finding:** CT LLMs show meaningful discrimination (MCC ~0.5). L3 ceiling effect — all models score 4.4–4.7/5.0 (judge too lenient; scores not discriminative). L2 Qwen/Llama outperform API models on field F1.
|
| 92 |
+
|
| 93 |
+
---
|
| 94 |
+
|
| 95 |
+
## PPI Domain (Protein-Protein Interaction)
|
| 96 |
+
|
| 97 |
+
**Status as of 2026-03-23:** ML 54/54 complete (seeds 42/43/44), LLM 80/80 complete, L3 judged.
|
| 98 |
+
|
| 99 |
+
### Database
|
| 100 |
+
- 2,229,670 negative results in DB (IntAct 779 / HuRI 500K / hu.MAP 1.23M / STRING 500K); 2,220,786 export rows after split filtering
|
| 101 |
+
- 61,728 positive pairs (HuRI Y2H), 18,412 proteins
|
| 102 |
+
|
| 103 |
+
### ML Results (PPI-M1: Binary Non-interaction Prediction)
|
| 104 |
+
Aggregated over 3 seeds (source: `results/ppi/table1_aggregated.md`):
|
| 105 |
+
|
| 106 |
+
| Model | Split | Negatives | AUROC | MCC | LogAUC |
|
| 107 |
+
|-------|-------|-----------|-------|-----|--------|
|
| 108 |
+
| SiameseCNN | random | NegBioDB | 0.963 ± 0.000 | 0.794 ± 0.012 | 0.517 ± 0.018 |
|
| 109 |
+
| PIPR | random | NegBioDB | 0.964 ± 0.001 | 0.812 ± 0.006 | 0.519 ± 0.009 |
|
| 110 |
+
| MLPFeatures | random | NegBioDB | 0.962 ± 0.001 | 0.788 ± 0.003 | 0.567 ± 0.005 |
|
| 111 |
+
| SiameseCNN | cold_protein | NegBioDB | 0.873 ± 0.002 | 0.568 ± 0.019 | 0.314 ± 0.014 |
|
| 112 |
+
| PIPR | cold_protein | NegBioDB | 0.859 ± 0.008 | 0.565 ± 0.019 | 0.288 ± 0.010 |
|
| 113 |
+
| **MLPFeatures** | **cold_protein** | **NegBioDB** | **0.931 ± 0.001** | **0.706 ± 0.005** | **0.476 ± 0.005** |
|
| 114 |
+
| SiameseCNN | cold_both | NegBioDB | 0.585 ± 0.040 | 0.070 ± 0.004 | 0.037 ± 0.010 |
|
| 115 |
+
| PIPR | cold_both | NegBioDB | 0.409 ± 0.077 | −0.018 ± 0.044 | 0.031 ± 0.019 |
|
| 116 |
+
| **MLPFeatures** | **cold_both** | **NegBioDB** | **0.950 ± 0.021** | **0.749 ± 0.043** | **0.595 ± 0.051** |
|
| 117 |
+
| SiameseCNN | random | uniform_random | 0.965 ± 0.001 | 0.806 ± 0.007 | 0.552 ± 0.002 |
|
| 118 |
+
| PIPR | random | uniform_random | 0.966 ± 0.000 | 0.810 ± 0.002 | 0.565 ± 0.005 |
|
| 119 |
+
|
| 120 |
+
**Key findings:**
|
| 121 |
+
- MLPFeatures dominates cold splits: AUROC 0.95 on cold_both (vs PIPR collapse to 0.41)
|
| 122 |
+
- PIPR catastrophic failure on cold_both: AUROC 0.41 (below random!)
|
| 123 |
+
- Control negatives (uniform_random, degree_matched) inflate LogAUC by +0.04–0.05
|
| 124 |
+
|
| 125 |
+
### LLM Results (80/80 complete, post-hoc fixes applied 2026-03-23)
|
| 126 |
+
| Task | Gemini | GPT-4o-mini | Haiku | Qwen-7B | Llama-8B |
|
| 127 |
+
|------|--------|-------------|-------|---------|---------|
|
| 128 |
+
| L1 (4-way MCQ, acc) zero-shot | 0.75 | 0.75 | 0.75 | 0.75 | 0.75 |
|
| 129 |
+
| L1 (4-way MCQ, acc) 3-shot | **1.000** | 0.997 | 0.998 | 0.998 | 0.997 |
|
| 130 |
+
| L2 (method_accuracy) 3-shot | **1.000** | ~0.94 | ~0.08 | ~0.94 | ~0.08 |
|
| 131 |
+
| L3 (reasoning, /5.0) zero-shot | 4.4–4.7 | 4.4–4.7 | 4.3–4.7 | 4.3–4.7 | 4.3–4.7 |
|
| 132 |
+
| L3 (reasoning, /5.0) 3-shot | 3.1–3.7 | 3.1–3.7 | 3.1–3.7 | 3.1–3.7 | 3.1–3.7 |
|
| 133 |
+
| L4 (discrim, MCC) zero-shot | 0.43 | **0.430** | 0.38 | 0.36 | **0.441** |
|
| 134 |
+
|
| 135 |
+
**Key findings:**
|
| 136 |
+
- L1: 3-shot near-perfect is an artifact (in-context examples reveal pattern format)
|
| 137 |
+
- L2: Gemini/GPT/Qwen correctly identify interaction methods, Haiku/Llama fail
|
| 138 |
+
- L3: zero-shot > 3-shot (gold reasoning examples degrade performance); structural reasoning collapses to ~1.2/5 in 3-shot
|
| 139 |
+
- L4: All models show contamination (acc_pre_2015 >> acc_post_2020)
|
| 140 |
+
|
| 141 |
+
---
|
| 142 |
+
|
| 143 |
+
## GE Domain (Gene Essentiality / DepMap)
|
| 144 |
+
|
| 145 |
+
**Status as of 2026-03-24:** ETL+ML complete, LLM 4/5 models done (Llama pending), L3 judged.
|
| 146 |
+
|
| 147 |
+
### Database
|
| 148 |
+
- 28,759,256 negative results (CRISPR 19.7M + RNAi 9.1M)
|
| 149 |
+
- Final tiers: Gold 753,878 / Silver 18,608,686 / Bronze 9,396,692
|
| 150 |
+
- 22,549,910 aggregated pairs (19,554 genes × 2,132 cell lines)
|
| 151 |
+
|
| 152 |
+
### ML Results (Seed 42 complete; seeds 43/44 in progress)
|
| 153 |
+
5 splits: random/cold_gene/cold_cell_line/cold_both/degree_balanced
|
| 154 |
+
Models: XGBoost, MLPFeatures
|
| 155 |
+
*Aggregated results (3 seeds) pending; individual seed 42 results available in `results/ge/`.*
|
| 156 |
+
|
| 157 |
+
### LLM Results (4/5 models complete — Llama pending)
|
| 158 |
+
| Task | Models Done | Key Finding |
|
| 159 |
+
|------|-------------|-------------|
|
| 160 |
+
| L1 (4-way MCQ, 1,200 items) | Haiku, Gemini, GPT, Qwen | Results pending Llama |
|
| 161 |
+
| L2 (field extraction, 500 items) | Haiku, Gemini, GPT, Qwen | Results pending Llama |
|
| 162 |
+
| L3 (reasoning, 200 items, judged) | Haiku, Gemini, GPT, Qwen | zero-shot >> 3-shot (4.5 vs 2.5 overall mean) |
|
| 163 |
+
| L4 (discrimination, 475 items) | Haiku, Gemini, GPT, Qwen | Results pending Llama |
|
| 164 |
+
|
| 165 |
+
**Expected key finding:** GE L4 MCC likely intermediate between PPI and DTI given DepMap is a public widely-studied dataset with high training data overlap.
|
| 166 |
+
|
| 167 |
+
---
|
| 168 |
+
|
| 169 |
+
## Methodology Notes
|
| 170 |
+
|
| 171 |
+
### ML Evaluation Protocol
|
| 172 |
+
- **Metrics:** AUROC (primary), LogAUC (early enrichment), AUPRC, MCC, BEDROC
|
| 173 |
+
- **Seeds:** 3 seeds (42, 43, 44) for statistical robustness (except GE seed 42 only so far)
|
| 174 |
+
- **Negative types:** NegBioDB (structured negatives) vs control negatives (uniform_random, degree_matched)
|
| 175 |
+
- **Splits:** Random → Cold (one entity unseen) → Cold-Both (both entities unseen, hardest)
|
| 176 |
+
|
| 177 |
+
### LLM Evaluation Protocol
|
| 178 |
+
- **Models:** Claude Haiku-4.5, Gemini 2.5-Flash, GPT-4o-mini, Qwen2.5-7B-Instruct, Llama-3.1-8B-Instruct
|
| 179 |
+
- **Configs:** Zero-shot × 1 + 3-shot × 3 fewshot sets = 4 configs per (model × task)
|
| 180 |
+
- **L3 judge:** Gemini 2.5-Flash LLM-as-judge, 4 dimensions × 5-point scale
|
| 181 |
+
- **L4 contamination test:** Older vs newer entity pairs (pre-cutoff vs post-cutoff)
|
migrations/001_initial_schema.sql
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-- ============================================================
|
| 2 |
+
-- NegBioDB Schema v1.0
|
| 3 |
+
-- Migration: 001_initial_schema
|
| 4 |
+
-- Database: SQLite 3.35+
|
| 5 |
+
-- ============================================================
|
| 6 |
+
|
| 7 |
+
PRAGMA journal_mode = WAL;
|
| 8 |
+
PRAGMA foreign_keys = ON;
|
| 9 |
+
PRAGMA encoding = 'UTF-8';
|
| 10 |
+
|
| 11 |
+
-- ============================================================
|
| 12 |
+
-- COMMON LAYER
|
| 13 |
+
-- ============================================================
|
| 14 |
+
|
| 15 |
+
CREATE TABLE compounds (
|
| 16 |
+
compound_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 17 |
+
canonical_smiles TEXT NOT NULL,
|
| 18 |
+
inchikey TEXT NOT NULL,
|
| 19 |
+
inchikey_connectivity TEXT NOT NULL,
|
| 20 |
+
inchi TEXT,
|
| 21 |
+
pubchem_cid INTEGER,
|
| 22 |
+
chembl_id TEXT,
|
| 23 |
+
bindingdb_id INTEGER,
|
| 24 |
+
molecular_weight REAL,
|
| 25 |
+
logp REAL,
|
| 26 |
+
hbd INTEGER,
|
| 27 |
+
hba INTEGER,
|
| 28 |
+
tpsa REAL,
|
| 29 |
+
rotatable_bonds INTEGER,
|
| 30 |
+
num_heavy_atoms INTEGER,
|
| 31 |
+
qed REAL,
|
| 32 |
+
pains_alert INTEGER DEFAULT 0,
|
| 33 |
+
aggregator_alert INTEGER DEFAULT 0,
|
| 34 |
+
lipinski_violations INTEGER DEFAULT 0,
|
| 35 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
|
| 36 |
+
updated_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 37 |
+
);
|
| 38 |
+
|
| 39 |
+
CREATE UNIQUE INDEX idx_compounds_inchikey ON compounds(inchikey);
|
| 40 |
+
CREATE INDEX idx_compounds_connectivity ON compounds(inchikey_connectivity);
|
| 41 |
+
CREATE INDEX idx_compounds_pubchem ON compounds(pubchem_cid) WHERE pubchem_cid IS NOT NULL;
|
| 42 |
+
CREATE INDEX idx_compounds_chembl ON compounds(chembl_id) WHERE chembl_id IS NOT NULL;
|
| 43 |
+
CREATE INDEX idx_compounds_smiles ON compounds(canonical_smiles);
|
| 44 |
+
|
| 45 |
+
CREATE TABLE targets (
|
| 46 |
+
target_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 47 |
+
uniprot_accession TEXT NOT NULL,
|
| 48 |
+
uniprot_entry_name TEXT,
|
| 49 |
+
amino_acid_sequence TEXT,
|
| 50 |
+
sequence_length INTEGER,
|
| 51 |
+
chembl_target_id TEXT,
|
| 52 |
+
gene_symbol TEXT,
|
| 53 |
+
ncbi_gene_id INTEGER,
|
| 54 |
+
target_family TEXT,
|
| 55 |
+
target_subfamily TEXT,
|
| 56 |
+
dto_class TEXT,
|
| 57 |
+
development_level TEXT CHECK (development_level IN ('Tclin', 'Tchem', 'Tbio', 'Tdark')),
|
| 58 |
+
organism TEXT DEFAULT 'Homo sapiens',
|
| 59 |
+
taxonomy_id INTEGER DEFAULT 9606,
|
| 60 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
|
| 61 |
+
updated_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 62 |
+
);
|
| 63 |
+
|
| 64 |
+
CREATE UNIQUE INDEX idx_targets_uniprot ON targets(uniprot_accession);
|
| 65 |
+
CREATE INDEX idx_targets_chembl ON targets(chembl_target_id) WHERE chembl_target_id IS NOT NULL;
|
| 66 |
+
CREATE INDEX idx_targets_gene ON targets(gene_symbol) WHERE gene_symbol IS NOT NULL;
|
| 67 |
+
CREATE INDEX idx_targets_family ON targets(target_family);
|
| 68 |
+
CREATE INDEX idx_targets_dev_level ON targets(development_level);
|
| 69 |
+
|
| 70 |
+
CREATE TABLE assays (
|
| 71 |
+
assay_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 72 |
+
source_db TEXT NOT NULL CHECK (source_db IN (
|
| 73 |
+
'pubchem', 'chembl', 'bindingdb', 'literature', 'community')),
|
| 74 |
+
source_assay_id TEXT NOT NULL,
|
| 75 |
+
assay_type TEXT,
|
| 76 |
+
assay_format TEXT CHECK (assay_format IN (
|
| 77 |
+
'biochemical', 'cell-based', 'in_vivo', 'unknown')),
|
| 78 |
+
assay_technology TEXT,
|
| 79 |
+
detection_method TEXT,
|
| 80 |
+
screen_type TEXT CHECK (screen_type IN (
|
| 81 |
+
'primary_single_point', 'confirmatory_dose_response',
|
| 82 |
+
'counter_screen', 'orthogonal_assay',
|
| 83 |
+
'literature_assay', 'unknown')),
|
| 84 |
+
z_factor REAL,
|
| 85 |
+
ssmd REAL,
|
| 86 |
+
cell_line TEXT,
|
| 87 |
+
description TEXT,
|
| 88 |
+
pubmed_id INTEGER,
|
| 89 |
+
doi TEXT,
|
| 90 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 91 |
+
);
|
| 92 |
+
|
| 93 |
+
CREATE UNIQUE INDEX idx_assays_source ON assays(source_db, source_assay_id);
|
| 94 |
+
CREATE INDEX idx_assays_format ON assays(assay_format);
|
| 95 |
+
CREATE INDEX idx_assays_screen ON assays(screen_type);
|
| 96 |
+
|
| 97 |
+
-- ============================================================
|
| 98 |
+
-- DTI DOMAIN LAYER
|
| 99 |
+
-- ============================================================
|
| 100 |
+
|
| 101 |
+
CREATE TABLE negative_results (
|
| 102 |
+
result_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 103 |
+
compound_id INTEGER NOT NULL REFERENCES compounds(compound_id),
|
| 104 |
+
target_id INTEGER NOT NULL REFERENCES targets(target_id),
|
| 105 |
+
assay_id INTEGER REFERENCES assays(assay_id),
|
| 106 |
+
result_type TEXT NOT NULL CHECK (result_type IN (
|
| 107 |
+
'hard_negative', 'conditional_negative',
|
| 108 |
+
'methodological_negative', 'dose_time_negative',
|
| 109 |
+
'hypothesis_negative')),
|
| 110 |
+
confidence_tier TEXT NOT NULL CHECK (confidence_tier IN (
|
| 111 |
+
'gold', 'silver', 'bronze', 'copper')),
|
| 112 |
+
activity_type TEXT,
|
| 113 |
+
activity_value REAL,
|
| 114 |
+
activity_unit TEXT,
|
| 115 |
+
activity_relation TEXT DEFAULT '=',
|
| 116 |
+
pchembl_value REAL,
|
| 117 |
+
inactivity_threshold REAL,
|
| 118 |
+
inactivity_threshold_unit TEXT DEFAULT 'nM',
|
| 119 |
+
max_concentration_tested REAL,
|
| 120 |
+
num_replicates INTEGER,
|
| 121 |
+
species_tested TEXT DEFAULT 'Homo sapiens',
|
| 122 |
+
source_db TEXT NOT NULL,
|
| 123 |
+
source_record_id TEXT NOT NULL,
|
| 124 |
+
extraction_method TEXT NOT NULL CHECK (extraction_method IN (
|
| 125 |
+
'database_direct', 'text_mining',
|
| 126 |
+
'llm_extracted', 'community_submitted')),
|
| 127 |
+
curator_validated INTEGER DEFAULT 0,
|
| 128 |
+
publication_year INTEGER,
|
| 129 |
+
deposition_date TEXT,
|
| 130 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
|
| 131 |
+
updated_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 132 |
+
);
|
| 133 |
+
|
| 134 |
+
CREATE INDEX idx_results_compound ON negative_results(compound_id);
|
| 135 |
+
CREATE INDEX idx_results_target ON negative_results(target_id);
|
| 136 |
+
CREATE INDEX idx_results_pair ON negative_results(compound_id, target_id);
|
| 137 |
+
CREATE INDEX idx_results_tier ON negative_results(confidence_tier);
|
| 138 |
+
CREATE INDEX idx_results_source ON negative_results(source_db);
|
| 139 |
+
CREATE INDEX idx_results_year ON negative_results(publication_year);
|
| 140 |
+
CREATE INDEX idx_results_type ON negative_results(result_type);
|
| 141 |
+
|
| 142 |
+
-- COALESCE handles NULL assay_id: SQLite treats NULL as distinct in UNIQUE indexes,
|
| 143 |
+
-- which would allow duplicate rows when assay_id is missing.
|
| 144 |
+
CREATE UNIQUE INDEX idx_results_unique_source ON negative_results(
|
| 145 |
+
compound_id, target_id, COALESCE(assay_id, -1), source_db, source_record_id);
|
| 146 |
+
|
| 147 |
+
CREATE TABLE dti_context (
|
| 148 |
+
result_id INTEGER PRIMARY KEY REFERENCES negative_results(result_id),
|
| 149 |
+
binding_site TEXT CHECK (binding_site IN (
|
| 150 |
+
'orthosteric', 'allosteric', 'unknown')),
|
| 151 |
+
selectivity_panel INTEGER DEFAULT 0,
|
| 152 |
+
counterpart_active INTEGER DEFAULT 0,
|
| 153 |
+
cell_permeability_issue INTEGER DEFAULT 0,
|
| 154 |
+
compound_solubility REAL,
|
| 155 |
+
compound_stability TEXT
|
| 156 |
+
);
|
| 157 |
+
|
| 158 |
+
-- ============================================================
|
| 159 |
+
-- AGGREGATION LAYER (for ML export)
|
| 160 |
+
-- ============================================================
|
| 161 |
+
|
| 162 |
+
CREATE TABLE compound_target_pairs (
|
| 163 |
+
pair_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 164 |
+
compound_id INTEGER NOT NULL REFERENCES compounds(compound_id),
|
| 165 |
+
target_id INTEGER NOT NULL REFERENCES targets(target_id),
|
| 166 |
+
num_assays INTEGER NOT NULL,
|
| 167 |
+
num_sources INTEGER NOT NULL,
|
| 168 |
+
best_confidence TEXT NOT NULL,
|
| 169 |
+
best_result_type TEXT,
|
| 170 |
+
earliest_year INTEGER,
|
| 171 |
+
median_pchembl REAL,
|
| 172 |
+
min_activity_value REAL,
|
| 173 |
+
max_activity_value REAL,
|
| 174 |
+
has_conflicting_results INTEGER DEFAULT 0,
|
| 175 |
+
compound_degree INTEGER,
|
| 176 |
+
target_degree INTEGER,
|
| 177 |
+
UNIQUE(compound_id, target_id)
|
| 178 |
+
);
|
| 179 |
+
|
| 180 |
+
CREATE INDEX idx_pairs_compound ON compound_target_pairs(compound_id);
|
| 181 |
+
CREATE INDEX idx_pairs_target ON compound_target_pairs(target_id);
|
| 182 |
+
CREATE INDEX idx_pairs_confidence ON compound_target_pairs(best_confidence);
|
| 183 |
+
|
| 184 |
+
-- ============================================================
|
| 185 |
+
-- BENCHMARK / ML LAYER
|
| 186 |
+
-- ============================================================
|
| 187 |
+
|
| 188 |
+
CREATE TABLE split_definitions (
|
| 189 |
+
split_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 190 |
+
split_name TEXT NOT NULL,
|
| 191 |
+
split_strategy TEXT NOT NULL CHECK (split_strategy IN (
|
| 192 |
+
'random', 'cold_compound', 'cold_target', 'cold_both',
|
| 193 |
+
'temporal', 'scaffold', 'degree_balanced')),
|
| 194 |
+
description TEXT,
|
| 195 |
+
random_seed INTEGER,
|
| 196 |
+
train_ratio REAL DEFAULT 0.7,
|
| 197 |
+
val_ratio REAL DEFAULT 0.1,
|
| 198 |
+
test_ratio REAL DEFAULT 0.2,
|
| 199 |
+
date_created TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
|
| 200 |
+
version TEXT DEFAULT '1.0',
|
| 201 |
+
UNIQUE(split_name, version)
|
| 202 |
+
);
|
| 203 |
+
|
| 204 |
+
CREATE TABLE split_assignments (
|
| 205 |
+
pair_id INTEGER NOT NULL REFERENCES compound_target_pairs(pair_id),
|
| 206 |
+
split_id INTEGER NOT NULL REFERENCES split_definitions(split_id),
|
| 207 |
+
fold TEXT NOT NULL CHECK (fold IN ('train', 'val', 'test')),
|
| 208 |
+
PRIMARY KEY (pair_id, split_id)
|
| 209 |
+
);
|
| 210 |
+
|
| 211 |
+
CREATE INDEX idx_splits_fold ON split_assignments(split_id, fold);
|
| 212 |
+
|
| 213 |
+
-- ============================================================
|
| 214 |
+
-- METADATA LAYER
|
| 215 |
+
-- ============================================================
|
| 216 |
+
|
| 217 |
+
CREATE TABLE dataset_versions (
|
| 218 |
+
version_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 219 |
+
version_tag TEXT NOT NULL UNIQUE,
|
| 220 |
+
description TEXT,
|
| 221 |
+
num_compounds INTEGER,
|
| 222 |
+
num_targets INTEGER,
|
| 223 |
+
num_pairs INTEGER,
|
| 224 |
+
num_results INTEGER,
|
| 225 |
+
schema_version TEXT,
|
| 226 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
|
| 227 |
+
checksum_sha256 TEXT
|
| 228 |
+
);
|
| 229 |
+
|
| 230 |
+
CREATE TABLE schema_migrations (
|
| 231 |
+
migration_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 232 |
+
version TEXT NOT NULL,
|
| 233 |
+
description TEXT,
|
| 234 |
+
applied_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
|
| 235 |
+
sql_up TEXT,
|
| 236 |
+
sql_down TEXT
|
| 237 |
+
);
|
| 238 |
+
|
| 239 |
+
-- Record this migration
|
| 240 |
+
INSERT INTO schema_migrations (version, description, sql_up)
|
| 241 |
+
VALUES ('001', 'Initial NegBioDB schema', 'Full DDL');
|
migrations/002_target_variants.sql
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-- ============================================================
|
| 2 |
+
-- NegBioDB Schema v1.1
|
| 3 |
+
-- Migration: 002_target_variants
|
| 4 |
+
-- Purpose:
|
| 5 |
+
-- 1) Separate target variants from canonical UniProt targets
|
| 6 |
+
-- 2) Link negative results to optional variant context
|
| 7 |
+
-- ============================================================
|
| 8 |
+
|
| 9 |
+
CREATE TABLE target_variants (
|
| 10 |
+
variant_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 11 |
+
target_id INTEGER NOT NULL REFERENCES targets(target_id),
|
| 12 |
+
variant_label TEXT NOT NULL, -- e.g., E255K, T315I
|
| 13 |
+
raw_gene_name TEXT, -- e.g., ABL1(E255K)-phosphorylated
|
| 14 |
+
source_db TEXT NOT NULL CHECK (source_db IN (
|
| 15 |
+
'davis', 'pubchem', 'chembl', 'bindingdb',
|
| 16 |
+
'literature', 'community')),
|
| 17 |
+
source_record_id TEXT NOT NULL DEFAULT '',
|
| 18 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
|
| 19 |
+
UNIQUE(target_id, variant_label, source_db, source_record_id)
|
| 20 |
+
);
|
| 21 |
+
|
| 22 |
+
CREATE INDEX idx_target_variants_target ON target_variants(target_id);
|
| 23 |
+
CREATE INDEX idx_target_variants_label ON target_variants(variant_label);
|
| 24 |
+
|
| 25 |
+
ALTER TABLE negative_results ADD COLUMN variant_id INTEGER REFERENCES target_variants(variant_id);
|
| 26 |
+
CREATE INDEX idx_results_variant ON negative_results(variant_id);
|
| 27 |
+
|
| 28 |
+
-- Record this migration
|
| 29 |
+
INSERT INTO schema_migrations (version, description, sql_up)
|
| 30 |
+
VALUES ('002', 'Add target_variants table and negative_results.variant_id', 'Full DDL');
|
migrations_ct/001_ct_initial_schema.sql
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-- NegBioDB Clinical Trial Failure Domain — Initial Schema
|
| 2 |
+
-- Migration 001: Core tables for clinical trial failure tracking
|
| 3 |
+
--
|
| 4 |
+
-- Reuses Common Layer patterns from DTI domain:
|
| 5 |
+
-- - schema_migrations for version tracking
|
| 6 |
+
-- - dataset_versions for provenance
|
| 7 |
+
-- - Confidence tiers (gold/silver/bronze/copper)
|
| 8 |
+
-- - WAL journal mode + FK enforcement (set by ct_db.py)
|
| 9 |
+
|
| 10 |
+
-- ============================================================
|
| 11 |
+
-- Common Layer tables (same as DTI)
|
| 12 |
+
-- ============================================================
|
| 13 |
+
|
| 14 |
+
CREATE TABLE IF NOT EXISTS schema_migrations (
|
| 15 |
+
version TEXT PRIMARY KEY,
|
| 16 |
+
applied_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 17 |
+
);
|
| 18 |
+
|
| 19 |
+
CREATE TABLE IF NOT EXISTS dataset_versions (
|
| 20 |
+
dataset_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 21 |
+
name TEXT NOT NULL,
|
| 22 |
+
version TEXT NOT NULL,
|
| 23 |
+
source_url TEXT,
|
| 24 |
+
download_date TEXT,
|
| 25 |
+
file_hash TEXT,
|
| 26 |
+
row_count INTEGER,
|
| 27 |
+
notes TEXT,
|
| 28 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 29 |
+
);
|
| 30 |
+
|
| 31 |
+
-- ============================================================
|
| 32 |
+
-- Domain-specific tables: Clinical Trial Failure
|
| 33 |
+
-- ============================================================
|
| 34 |
+
|
| 35 |
+
-- Interventions (drugs, biologics, devices, etc.)
|
| 36 |
+
CREATE TABLE interventions (
|
| 37 |
+
intervention_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 38 |
+
intervention_type TEXT NOT NULL CHECK (intervention_type IN (
|
| 39 |
+
'drug', 'biologic', 'device', 'procedure', 'behavioral',
|
| 40 |
+
'dietary', 'genetic', 'radiation', 'combination', 'other')),
|
| 41 |
+
intervention_name TEXT NOT NULL,
|
| 42 |
+
canonical_name TEXT,
|
| 43 |
+
drugbank_id TEXT,
|
| 44 |
+
pubchem_cid INTEGER,
|
| 45 |
+
chembl_id TEXT,
|
| 46 |
+
mesh_id TEXT,
|
| 47 |
+
atc_code TEXT,
|
| 48 |
+
mechanism_of_action TEXT,
|
| 49 |
+
canonical_smiles TEXT,
|
| 50 |
+
canonical_sequence TEXT,
|
| 51 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
|
| 52 |
+
updated_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 53 |
+
);
|
| 54 |
+
|
| 55 |
+
-- Conditions (diseases/indications)
|
| 56 |
+
CREATE TABLE conditions (
|
| 57 |
+
condition_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 58 |
+
condition_name TEXT NOT NULL,
|
| 59 |
+
canonical_name TEXT,
|
| 60 |
+
mesh_id TEXT,
|
| 61 |
+
icd10_code TEXT,
|
| 62 |
+
icd11_code TEXT,
|
| 63 |
+
do_id TEXT,
|
| 64 |
+
therapeutic_area TEXT,
|
| 65 |
+
condition_class TEXT,
|
| 66 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
|
| 67 |
+
updated_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 68 |
+
);
|
| 69 |
+
|
| 70 |
+
-- Molecular targets (bridge to DTI domain)
|
| 71 |
+
CREATE TABLE intervention_targets (
|
| 72 |
+
intervention_id INTEGER NOT NULL REFERENCES interventions(intervention_id),
|
| 73 |
+
uniprot_accession TEXT NOT NULL,
|
| 74 |
+
gene_symbol TEXT,
|
| 75 |
+
target_role TEXT CHECK (target_role IN (
|
| 76 |
+
'primary', 'secondary', 'off_target')),
|
| 77 |
+
action_type TEXT,
|
| 78 |
+
source TEXT NOT NULL,
|
| 79 |
+
PRIMARY KEY (intervention_id, uniprot_accession)
|
| 80 |
+
);
|
| 81 |
+
|
| 82 |
+
-- Clinical trials
|
| 83 |
+
CREATE TABLE clinical_trials (
|
| 84 |
+
trial_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 85 |
+
source_db TEXT NOT NULL CHECK (source_db IN (
|
| 86 |
+
'clinicaltrials_gov', 'eu_ctr', 'who_ictrp', 'literature')),
|
| 87 |
+
source_trial_id TEXT NOT NULL,
|
| 88 |
+
overall_status TEXT NOT NULL,
|
| 89 |
+
trial_phase TEXT CHECK (trial_phase IN (
|
| 90 |
+
'early_phase_1', 'phase_1', 'phase_1_2', 'phase_2',
|
| 91 |
+
'phase_2_3', 'phase_3', 'phase_4', 'not_applicable')),
|
| 92 |
+
study_type TEXT,
|
| 93 |
+
study_design TEXT,
|
| 94 |
+
blinding TEXT,
|
| 95 |
+
randomized INTEGER DEFAULT 0,
|
| 96 |
+
enrollment_target INTEGER,
|
| 97 |
+
enrollment_actual INTEGER,
|
| 98 |
+
primary_endpoint TEXT,
|
| 99 |
+
primary_endpoint_type TEXT,
|
| 100 |
+
control_type TEXT,
|
| 101 |
+
sponsor_type TEXT CHECK (sponsor_type IN (
|
| 102 |
+
'industry', 'academic', 'government', 'other')),
|
| 103 |
+
sponsor_name TEXT,
|
| 104 |
+
start_date TEXT,
|
| 105 |
+
primary_completion_date TEXT,
|
| 106 |
+
completion_date TEXT,
|
| 107 |
+
results_posted_date TEXT,
|
| 108 |
+
why_stopped TEXT,
|
| 109 |
+
has_results INTEGER DEFAULT 0,
|
| 110 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
|
| 111 |
+
UNIQUE(source_db, source_trial_id)
|
| 112 |
+
);
|
| 113 |
+
|
| 114 |
+
-- Junction: trials <-> interventions (many-to-many)
|
| 115 |
+
CREATE TABLE trial_interventions (
|
| 116 |
+
trial_id INTEGER NOT NULL REFERENCES clinical_trials(trial_id),
|
| 117 |
+
intervention_id INTEGER NOT NULL REFERENCES interventions(intervention_id),
|
| 118 |
+
arm_group TEXT,
|
| 119 |
+
arm_role TEXT CHECK (arm_role IN (
|
| 120 |
+
'experimental', 'active_comparator', 'placebo_comparator', 'no_intervention')),
|
| 121 |
+
dose_regimen TEXT,
|
| 122 |
+
PRIMARY KEY (trial_id, intervention_id)
|
| 123 |
+
);
|
| 124 |
+
|
| 125 |
+
-- Junction: trials <-> conditions (many-to-many)
|
| 126 |
+
CREATE TABLE trial_conditions (
|
| 127 |
+
trial_id INTEGER NOT NULL REFERENCES clinical_trials(trial_id),
|
| 128 |
+
condition_id INTEGER NOT NULL REFERENCES conditions(condition_id),
|
| 129 |
+
PRIMARY KEY (trial_id, condition_id)
|
| 130 |
+
);
|
| 131 |
+
|
| 132 |
+
-- Junction: trials <-> publications
|
| 133 |
+
CREATE TABLE trial_publications (
|
| 134 |
+
trial_id INTEGER NOT NULL REFERENCES clinical_trials(trial_id),
|
| 135 |
+
pubmed_id INTEGER NOT NULL,
|
| 136 |
+
pub_type TEXT,
|
| 137 |
+
PRIMARY KEY (trial_id, pubmed_id)
|
| 138 |
+
);
|
| 139 |
+
|
| 140 |
+
-- Combination therapy decomposition
|
| 141 |
+
CREATE TABLE combination_components (
|
| 142 |
+
combination_id INTEGER NOT NULL REFERENCES interventions(intervention_id),
|
| 143 |
+
component_id INTEGER NOT NULL REFERENCES interventions(intervention_id),
|
| 144 |
+
role TEXT CHECK (role IN ('experimental', 'backbone', 'comparator')),
|
| 145 |
+
PRIMARY KEY (combination_id, component_id)
|
| 146 |
+
);
|
| 147 |
+
|
| 148 |
+
-- Trial failure results (core fact table)
|
| 149 |
+
CREATE TABLE trial_failure_results (
|
| 150 |
+
result_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 151 |
+
intervention_id INTEGER NOT NULL REFERENCES interventions(intervention_id),
|
| 152 |
+
condition_id INTEGER NOT NULL REFERENCES conditions(condition_id),
|
| 153 |
+
trial_id INTEGER REFERENCES clinical_trials(trial_id),
|
| 154 |
+
|
| 155 |
+
-- Hierarchical failure classification
|
| 156 |
+
failure_category TEXT NOT NULL CHECK (failure_category IN (
|
| 157 |
+
'efficacy', 'safety', 'pharmacokinetic', 'enrollment',
|
| 158 |
+
'strategic', 'regulatory', 'design', 'other')),
|
| 159 |
+
failure_subcategory TEXT,
|
| 160 |
+
failure_detail TEXT,
|
| 161 |
+
|
| 162 |
+
-- Confidence tier (reused from DTI)
|
| 163 |
+
confidence_tier TEXT NOT NULL CHECK (confidence_tier IN (
|
| 164 |
+
'gold', 'silver', 'bronze', 'copper')),
|
| 165 |
+
|
| 166 |
+
-- Arm-level context (multi-arm trials)
|
| 167 |
+
arm_description TEXT,
|
| 168 |
+
arm_type TEXT CHECK (arm_type IN (
|
| 169 |
+
'experimental', 'active_comparator', 'placebo_comparator', 'overall')),
|
| 170 |
+
|
| 171 |
+
-- Quantitative outcome data
|
| 172 |
+
primary_endpoint_met INTEGER,
|
| 173 |
+
p_value_primary REAL,
|
| 174 |
+
effect_size REAL,
|
| 175 |
+
effect_size_type TEXT,
|
| 176 |
+
ci_lower REAL,
|
| 177 |
+
ci_upper REAL,
|
| 178 |
+
sample_size_treatment INTEGER,
|
| 179 |
+
sample_size_control INTEGER,
|
| 180 |
+
|
| 181 |
+
-- Safety signals
|
| 182 |
+
serious_adverse_events INTEGER,
|
| 183 |
+
deaths_treatment INTEGER,
|
| 184 |
+
deaths_control INTEGER,
|
| 185 |
+
dsmb_stopped INTEGER DEFAULT 0,
|
| 186 |
+
|
| 187 |
+
-- Phase context
|
| 188 |
+
highest_phase_reached TEXT,
|
| 189 |
+
prior_phase_succeeded INTEGER DEFAULT 0,
|
| 190 |
+
|
| 191 |
+
-- Provenance
|
| 192 |
+
source_db TEXT NOT NULL,
|
| 193 |
+
source_record_id TEXT NOT NULL,
|
| 194 |
+
extraction_method TEXT NOT NULL CHECK (extraction_method IN (
|
| 195 |
+
'database_direct', 'nlp_classified', 'text_mining',
|
| 196 |
+
'llm_extracted', 'community_submitted')),
|
| 197 |
+
curator_validated INTEGER DEFAULT 0,
|
| 198 |
+
publication_year INTEGER,
|
| 199 |
+
|
| 200 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
|
| 201 |
+
updated_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 202 |
+
);
|
| 203 |
+
|
| 204 |
+
-- Aggregation table
|
| 205 |
+
CREATE TABLE intervention_condition_pairs (
|
| 206 |
+
pair_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 207 |
+
intervention_id INTEGER NOT NULL REFERENCES interventions(intervention_id),
|
| 208 |
+
condition_id INTEGER NOT NULL REFERENCES conditions(condition_id),
|
| 209 |
+
num_trials INTEGER NOT NULL,
|
| 210 |
+
num_sources INTEGER NOT NULL,
|
| 211 |
+
best_confidence TEXT NOT NULL,
|
| 212 |
+
primary_failure_category TEXT,
|
| 213 |
+
earliest_year INTEGER,
|
| 214 |
+
highest_phase_reached TEXT,
|
| 215 |
+
has_any_approval INTEGER DEFAULT 0,
|
| 216 |
+
intervention_degree INTEGER,
|
| 217 |
+
condition_degree INTEGER,
|
| 218 |
+
UNIQUE(intervention_id, condition_id)
|
| 219 |
+
);
|
| 220 |
+
|
| 221 |
+
-- Trial failure context (extended metadata)
|
| 222 |
+
CREATE TABLE trial_failure_context (
|
| 223 |
+
result_id INTEGER PRIMARY KEY REFERENCES trial_failure_results(result_id),
|
| 224 |
+
patient_population TEXT,
|
| 225 |
+
biomarker_stratified INTEGER DEFAULT 0,
|
| 226 |
+
companion_diagnostic TEXT,
|
| 227 |
+
prior_treatment_lines INTEGER,
|
| 228 |
+
comparator_drug TEXT,
|
| 229 |
+
geographic_regions TEXT,
|
| 230 |
+
regulatory_pathway TEXT,
|
| 231 |
+
genetic_evidence INTEGER DEFAULT 0,
|
| 232 |
+
class_effect_known INTEGER DEFAULT 0,
|
| 233 |
+
has_negbiodb_dti_data INTEGER DEFAULT 0
|
| 234 |
+
);
|
| 235 |
+
|
| 236 |
+
-- ============================================================
|
| 237 |
+
-- Indices for performance
|
| 238 |
+
-- ============================================================
|
| 239 |
+
|
| 240 |
+
CREATE INDEX idx_tfr_failure_category ON trial_failure_results(failure_category);
|
| 241 |
+
CREATE INDEX idx_tfr_confidence ON trial_failure_results(confidence_tier);
|
| 242 |
+
CREATE INDEX idx_tfr_intervention ON trial_failure_results(intervention_id);
|
| 243 |
+
CREATE INDEX idx_tfr_condition ON trial_failure_results(condition_id);
|
| 244 |
+
CREATE INDEX idx_tfr_trial ON trial_failure_results(trial_id);
|
| 245 |
+
CREATE INDEX idx_ct_status ON clinical_trials(overall_status);
|
| 246 |
+
CREATE INDEX idx_ct_phase ON clinical_trials(trial_phase);
|
| 247 |
+
CREATE INDEX idx_ct_completion ON clinical_trials(primary_completion_date);
|
| 248 |
+
CREATE INDEX idx_ct_source ON clinical_trials(source_db);
|
| 249 |
+
CREATE INDEX idx_interv_chembl ON interventions(chembl_id);
|
| 250 |
+
CREATE INDEX idx_interv_drugbank ON interventions(drugbank_id);
|
| 251 |
+
CREATE INDEX idx_interv_name ON interventions(intervention_name);
|
| 252 |
+
CREATE INDEX idx_cond_mesh ON conditions(mesh_id);
|
| 253 |
+
CREATE INDEX idx_cond_icd10 ON conditions(icd10_code);
|
| 254 |
+
CREATE INDEX idx_icp_intervention ON intervention_condition_pairs(intervention_id);
|
| 255 |
+
CREATE INDEX idx_icp_condition ON intervention_condition_pairs(condition_id);
|
| 256 |
+
|
| 257 |
+
-- Record migration
|
| 258 |
+
INSERT INTO schema_migrations (version) VALUES ('001');
|
migrations_ct/002_schema_fixes.sql
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-- NegBioDB Clinical Trial Failure Domain — Schema Fixes
|
| 2 |
+
-- Migration 002: Expert review fixes (6 issues)
|
| 3 |
+
--
|
| 4 |
+
-- Issues addressed:
|
| 5 |
+
-- 0.1 UNIQUE constraint on trial_failure_results (dedup protection)
|
| 6 |
+
-- 0.2 highest_phase_reached ordering (fixed in ct_db.py, not SQL)
|
| 7 |
+
-- 0.3 inchikey + inchikey_connectivity on interventions (DTI bridge)
|
| 8 |
+
-- 0.4 molecular_type on interventions (ML featurization)
|
| 9 |
+
-- 0.5 result_interpretation on trial_failure_results (neg vs inconclusive)
|
| 10 |
+
-- 0.6 termination_type on clinical_trials (admin vs clinical failure)
|
| 11 |
+
|
| 12 |
+
-- 0.1: Dedup protection — prevent same failure record from same source
|
| 13 |
+
CREATE UNIQUE INDEX IF NOT EXISTS idx_tfr_unique_source
|
| 14 |
+
ON trial_failure_results(
|
| 15 |
+
intervention_id, condition_id,
|
| 16 |
+
COALESCE(trial_id, -1),
|
| 17 |
+
source_db, source_record_id);
|
| 18 |
+
|
| 19 |
+
-- 0.3: InChIKey columns for DTI-CT cross-domain bridge
|
| 20 |
+
ALTER TABLE interventions ADD COLUMN inchikey TEXT;
|
| 21 |
+
ALTER TABLE interventions ADD COLUMN inchikey_connectivity TEXT;
|
| 22 |
+
|
| 23 |
+
-- 0.4: Molecular type for ML featurization (biologics need different features)
|
| 24 |
+
ALTER TABLE interventions ADD COLUMN molecular_type TEXT CHECK(molecular_type IN (
|
| 25 |
+
'small_molecule', 'monoclonal_antibody', 'antibody_drug_conjugate',
|
| 26 |
+
'peptide', 'oligonucleotide', 'cell_therapy', 'gene_therapy',
|
| 27 |
+
'other_biologic', 'unknown'));
|
| 28 |
+
|
| 29 |
+
-- 0.5: Result interpretation (definitive negative vs inconclusive)
|
| 30 |
+
ALTER TABLE trial_failure_results ADD COLUMN result_interpretation TEXT CHECK(
|
| 31 |
+
result_interpretation IN (
|
| 32 |
+
'definitive_negative', 'inconclusive_underpowered',
|
| 33 |
+
'mixed_endpoints', 'futility_stopped',
|
| 34 |
+
'safety_stopped', 'administrative'));
|
| 35 |
+
|
| 36 |
+
-- 0.6: Termination type (clinical failure vs administrative)
|
| 37 |
+
ALTER TABLE clinical_trials ADD COLUMN termination_type TEXT CHECK(
|
| 38 |
+
termination_type IN (
|
| 39 |
+
'clinical_failure', 'administrative',
|
| 40 |
+
'external_event', 'unknown'));
|
| 41 |
+
|
| 42 |
+
-- Index on new columns used in queries
|
| 43 |
+
CREATE INDEX IF NOT EXISTS idx_interv_inchikey ON interventions(inchikey_connectivity);
|
| 44 |
+
CREATE INDEX IF NOT EXISTS idx_interv_moltype ON interventions(molecular_type);
|
| 45 |
+
CREATE INDEX IF NOT EXISTS idx_ct_termtype ON clinical_trials(termination_type);
|
| 46 |
+
|
| 47 |
+
-- Record migration
|
| 48 |
+
INSERT INTO schema_migrations (version) VALUES ('002');
|
migrations_depmap/001_ge_initial_schema.sql
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-- NegBioDB Gene Essentiality (GE) Domain — Initial Schema
|
| 2 |
+
-- Migration 001: Core tables for DepMap CRISPR/RNAi gene essentiality negatives
|
| 3 |
+
--
|
| 4 |
+
-- Design decisions:
|
| 5 |
+
-- - Asymmetric pairs: gene + cell_line (not symmetric like PPI)
|
| 6 |
+
-- - Separate genes/cell_lines tables (separate DB from DTI/CT/PPI)
|
| 7 |
+
-- - Confidence tiers: gold/silver/bronze (same framework as DTI/CT/PPI)
|
| 8 |
+
-- - PRISM bridge tables: cross-domain link to DTI via InChIKey/ChEMBL
|
| 9 |
+
-- - Dedup: UNIQUE on (gene_id, cell_line_id, screen_id, source_db)
|
| 10 |
+
-- - Reference flags on genes table for common essential / nonessential sets
|
| 11 |
+
|
| 12 |
+
-- ============================================================
|
| 13 |
+
-- Common Layer tables (same as DTI/CT/PPI)
|
| 14 |
+
-- ============================================================
|
| 15 |
+
|
| 16 |
+
CREATE TABLE IF NOT EXISTS schema_migrations (
|
| 17 |
+
version TEXT PRIMARY KEY,
|
| 18 |
+
applied_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 19 |
+
);
|
| 20 |
+
|
| 21 |
+
CREATE TABLE IF NOT EXISTS dataset_versions (
|
| 22 |
+
dataset_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 23 |
+
name TEXT NOT NULL,
|
| 24 |
+
version TEXT NOT NULL,
|
| 25 |
+
source_url TEXT,
|
| 26 |
+
download_date TEXT,
|
| 27 |
+
file_hash TEXT,
|
| 28 |
+
row_count INTEGER,
|
| 29 |
+
notes TEXT,
|
| 30 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 31 |
+
);
|
| 32 |
+
|
| 33 |
+
-- ============================================================
|
| 34 |
+
-- Domain-specific tables: Gene Essentiality
|
| 35 |
+
-- ============================================================
|
| 36 |
+
|
| 37 |
+
-- Genes table
|
| 38 |
+
CREATE TABLE genes (
|
| 39 |
+
gene_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 40 |
+
entrez_id INTEGER UNIQUE,
|
| 41 |
+
gene_symbol TEXT NOT NULL,
|
| 42 |
+
ensembl_id TEXT,
|
| 43 |
+
description TEXT,
|
| 44 |
+
is_common_essential INTEGER DEFAULT 0,
|
| 45 |
+
is_reference_nonessential INTEGER DEFAULT 0,
|
| 46 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
|
| 47 |
+
updated_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 48 |
+
);
|
| 49 |
+
|
| 50 |
+
CREATE INDEX idx_genes_symbol ON genes(gene_symbol);
|
| 51 |
+
CREATE INDEX idx_genes_entrez ON genes(entrez_id) WHERE entrez_id IS NOT NULL;
|
| 52 |
+
|
| 53 |
+
-- Cell lines table
|
| 54 |
+
CREATE TABLE cell_lines (
|
| 55 |
+
cell_line_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 56 |
+
model_id TEXT NOT NULL UNIQUE,
|
| 57 |
+
ccle_name TEXT,
|
| 58 |
+
stripped_name TEXT,
|
| 59 |
+
lineage TEXT,
|
| 60 |
+
primary_disease TEXT,
|
| 61 |
+
subtype TEXT,
|
| 62 |
+
sex TEXT,
|
| 63 |
+
primary_or_metastasis TEXT,
|
| 64 |
+
sample_collection_site TEXT,
|
| 65 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
|
| 66 |
+
updated_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 67 |
+
);
|
| 68 |
+
|
| 69 |
+
CREATE INDEX idx_cell_lines_ccle ON cell_lines(ccle_name) WHERE ccle_name IS NOT NULL;
|
| 70 |
+
CREATE INDEX idx_cell_lines_stripped ON cell_lines(stripped_name) WHERE stripped_name IS NOT NULL;
|
| 71 |
+
CREATE INDEX idx_cell_lines_lineage ON cell_lines(lineage) WHERE lineage IS NOT NULL;
|
| 72 |
+
|
| 73 |
+
-- Screen configurations
|
| 74 |
+
CREATE TABLE ge_screens (
|
| 75 |
+
screen_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 76 |
+
source_db TEXT NOT NULL CHECK (source_db IN (
|
| 77 |
+
'depmap', 'project_score', 'demeter2')),
|
| 78 |
+
depmap_release TEXT NOT NULL,
|
| 79 |
+
screen_type TEXT NOT NULL CHECK (screen_type IN ('crispr', 'rnai')),
|
| 80 |
+
library TEXT,
|
| 81 |
+
algorithm TEXT,
|
| 82 |
+
notes TEXT,
|
| 83 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 84 |
+
);
|
| 85 |
+
|
| 86 |
+
CREATE UNIQUE INDEX idx_ge_screens_source
|
| 87 |
+
ON ge_screens(source_db, depmap_release, screen_type);
|
| 88 |
+
|
| 89 |
+
-- Core fact table: GE negative results (non-essential gene-cell_line pairs)
|
| 90 |
+
CREATE TABLE ge_negative_results (
|
| 91 |
+
result_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 92 |
+
gene_id INTEGER NOT NULL REFERENCES genes(gene_id),
|
| 93 |
+
cell_line_id INTEGER NOT NULL REFERENCES cell_lines(cell_line_id),
|
| 94 |
+
screen_id INTEGER REFERENCES ge_screens(screen_id),
|
| 95 |
+
|
| 96 |
+
gene_effect_score REAL,
|
| 97 |
+
dependency_probability REAL,
|
| 98 |
+
|
| 99 |
+
evidence_type TEXT NOT NULL CHECK (evidence_type IN (
|
| 100 |
+
'crispr_nonessential',
|
| 101 |
+
'rnai_nonessential',
|
| 102 |
+
'multi_screen_concordant',
|
| 103 |
+
'reference_nonessential',
|
| 104 |
+
'context_nonessential')),
|
| 105 |
+
|
| 106 |
+
confidence_tier TEXT NOT NULL CHECK (confidence_tier IN (
|
| 107 |
+
'gold', 'silver', 'bronze')),
|
| 108 |
+
|
| 109 |
+
source_db TEXT NOT NULL,
|
| 110 |
+
source_record_id TEXT NOT NULL,
|
| 111 |
+
extraction_method TEXT NOT NULL CHECK (extraction_method IN (
|
| 112 |
+
'score_threshold', 'reference_set', 'multi_source_concordance')),
|
| 113 |
+
|
| 114 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
|
| 115 |
+
updated_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 116 |
+
);
|
| 117 |
+
|
| 118 |
+
CREATE INDEX idx_ge_nr_gene ON ge_negative_results(gene_id);
|
| 119 |
+
CREATE INDEX idx_ge_nr_cell_line ON ge_negative_results(cell_line_id);
|
| 120 |
+
CREATE INDEX idx_ge_nr_pair ON ge_negative_results(gene_id, cell_line_id);
|
| 121 |
+
CREATE INDEX idx_ge_nr_tier ON ge_negative_results(confidence_tier);
|
| 122 |
+
CREATE INDEX idx_ge_nr_source ON ge_negative_results(source_db);
|
| 123 |
+
|
| 124 |
+
CREATE UNIQUE INDEX idx_ge_nr_unique_source ON ge_negative_results(
|
| 125 |
+
gene_id, cell_line_id,
|
| 126 |
+
COALESCE(screen_id, -1),
|
| 127 |
+
source_db);
|
| 128 |
+
|
| 129 |
+
-- ============================================================
|
| 130 |
+
-- Aggregation table
|
| 131 |
+
-- ============================================================
|
| 132 |
+
|
| 133 |
+
CREATE TABLE gene_cell_pairs (
|
| 134 |
+
pair_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 135 |
+
gene_id INTEGER NOT NULL REFERENCES genes(gene_id),
|
| 136 |
+
cell_line_id INTEGER NOT NULL REFERENCES cell_lines(cell_line_id),
|
| 137 |
+
num_screens INTEGER NOT NULL,
|
| 138 |
+
num_sources INTEGER NOT NULL,
|
| 139 |
+
best_confidence TEXT NOT NULL,
|
| 140 |
+
best_evidence_type TEXT,
|
| 141 |
+
min_gene_effect REAL,
|
| 142 |
+
max_gene_effect REAL,
|
| 143 |
+
mean_gene_effect REAL,
|
| 144 |
+
gene_degree INTEGER,
|
| 145 |
+
cell_line_degree INTEGER,
|
| 146 |
+
UNIQUE(gene_id, cell_line_id)
|
| 147 |
+
);
|
| 148 |
+
|
| 149 |
+
CREATE INDEX idx_gcp_gene ON gene_cell_pairs(gene_id);
|
| 150 |
+
CREATE INDEX idx_gcp_cell_line ON gene_cell_pairs(cell_line_id);
|
| 151 |
+
CREATE INDEX idx_gcp_confidence ON gene_cell_pairs(best_confidence);
|
| 152 |
+
|
| 153 |
+
-- ============================================================
|
| 154 |
+
-- Benchmark split tables
|
| 155 |
+
-- ============================================================
|
| 156 |
+
|
| 157 |
+
CREATE TABLE ge_split_definitions (
|
| 158 |
+
split_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 159 |
+
split_name TEXT NOT NULL,
|
| 160 |
+
split_strategy TEXT NOT NULL CHECK (split_strategy IN (
|
| 161 |
+
'random', 'cold_gene', 'cold_cell_line',
|
| 162 |
+
'cold_both', 'degree_balanced')),
|
| 163 |
+
description TEXT,
|
| 164 |
+
random_seed INTEGER,
|
| 165 |
+
train_ratio REAL DEFAULT 0.7,
|
| 166 |
+
val_ratio REAL DEFAULT 0.1,
|
| 167 |
+
test_ratio REAL DEFAULT 0.2,
|
| 168 |
+
date_created TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
|
| 169 |
+
version TEXT DEFAULT '1.0',
|
| 170 |
+
UNIQUE(split_name, version)
|
| 171 |
+
);
|
| 172 |
+
|
| 173 |
+
CREATE TABLE ge_split_assignments (
|
| 174 |
+
pair_id INTEGER NOT NULL REFERENCES gene_cell_pairs(pair_id),
|
| 175 |
+
split_id INTEGER NOT NULL REFERENCES ge_split_definitions(split_id),
|
| 176 |
+
fold TEXT NOT NULL CHECK (fold IN ('train', 'val', 'test')),
|
| 177 |
+
PRIMARY KEY (pair_id, split_id)
|
| 178 |
+
);
|
| 179 |
+
|
| 180 |
+
CREATE INDEX idx_ge_splits_fold ON ge_split_assignments(split_id, fold);
|
| 181 |
+
|
| 182 |
+
-- ============================================================
|
| 183 |
+
-- PRISM drug sensitivity bridge tables
|
| 184 |
+
-- ============================================================
|
| 185 |
+
|
| 186 |
+
CREATE TABLE prism_compounds (
|
| 187 |
+
compound_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 188 |
+
broad_id TEXT UNIQUE,
|
| 189 |
+
name TEXT,
|
| 190 |
+
smiles TEXT,
|
| 191 |
+
inchikey TEXT,
|
| 192 |
+
chembl_id TEXT,
|
| 193 |
+
pubchem_cid INTEGER,
|
| 194 |
+
mechanism_of_action TEXT,
|
| 195 |
+
target_name TEXT,
|
| 196 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 197 |
+
);
|
| 198 |
+
|
| 199 |
+
CREATE INDEX idx_prism_inchikey ON prism_compounds(inchikey)
|
| 200 |
+
WHERE inchikey IS NOT NULL;
|
| 201 |
+
CREATE INDEX idx_prism_chembl ON prism_compounds(chembl_id)
|
| 202 |
+
WHERE chembl_id IS NOT NULL;
|
| 203 |
+
|
| 204 |
+
CREATE TABLE prism_sensitivity (
|
| 205 |
+
sensitivity_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 206 |
+
compound_id INTEGER NOT NULL REFERENCES prism_compounds(compound_id),
|
| 207 |
+
cell_line_id INTEGER NOT NULL REFERENCES cell_lines(cell_line_id),
|
| 208 |
+
screen_type TEXT CHECK (screen_type IN ('primary', 'secondary')),
|
| 209 |
+
log_fold_change REAL,
|
| 210 |
+
auc REAL,
|
| 211 |
+
ic50 REAL,
|
| 212 |
+
ec50 REAL,
|
| 213 |
+
depmap_release TEXT,
|
| 214 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 215 |
+
);
|
| 216 |
+
|
| 217 |
+
CREATE INDEX idx_prism_sens_compound ON prism_sensitivity(compound_id);
|
| 218 |
+
CREATE INDEX idx_prism_sens_cell_line ON prism_sensitivity(cell_line_id);
|
| 219 |
+
|
| 220 |
+
-- Record migration
|
| 221 |
+
INSERT INTO schema_migrations (version) VALUES ('001');
|
migrations_ppi/001_ppi_initial_schema.sql
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-- NegBioDB PPI Domain — Initial Schema
|
| 2 |
+
-- Migration 001: Core tables for protein-protein interaction negatives
|
| 3 |
+
--
|
| 4 |
+
-- Design decisions:
|
| 5 |
+
-- - Symmetric pairs: CHECK (protein1_id < protein2_id) canonical ordering
|
| 6 |
+
-- - Separate proteins table (not reusing DTI targets — separate DB)
|
| 7 |
+
-- - Confidence tiers: gold/silver/bronze/copper (same as DTI/CT)
|
| 8 |
+
-- - Dedup: COALESCE(experiment_id, -1) pattern (same as DTI/CT)
|
| 9 |
+
|
| 10 |
+
-- ============================================================
|
| 11 |
+
-- Common Layer tables (same as DTI/CT)
|
| 12 |
+
-- ============================================================
|
| 13 |
+
|
| 14 |
+
CREATE TABLE IF NOT EXISTS schema_migrations (
|
| 15 |
+
version TEXT PRIMARY KEY,
|
| 16 |
+
applied_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 17 |
+
);
|
| 18 |
+
|
| 19 |
+
CREATE TABLE IF NOT EXISTS dataset_versions (
|
| 20 |
+
dataset_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 21 |
+
name TEXT NOT NULL,
|
| 22 |
+
version TEXT NOT NULL,
|
| 23 |
+
source_url TEXT,
|
| 24 |
+
download_date TEXT,
|
| 25 |
+
file_hash TEXT,
|
| 26 |
+
row_count INTEGER,
|
| 27 |
+
notes TEXT,
|
| 28 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 29 |
+
);
|
| 30 |
+
|
| 31 |
+
-- ============================================================
|
| 32 |
+
-- Domain-specific tables: PPI Negatives
|
| 33 |
+
-- ============================================================
|
| 34 |
+
|
| 35 |
+
-- Proteins table
|
| 36 |
+
CREATE TABLE proteins (
|
| 37 |
+
protein_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 38 |
+
uniprot_accession TEXT NOT NULL,
|
| 39 |
+
uniprot_entry_name TEXT,
|
| 40 |
+
gene_symbol TEXT,
|
| 41 |
+
amino_acid_sequence TEXT,
|
| 42 |
+
sequence_length INTEGER,
|
| 43 |
+
organism TEXT DEFAULT 'Homo sapiens',
|
| 44 |
+
taxonomy_id INTEGER DEFAULT 9606,
|
| 45 |
+
subcellular_location TEXT,
|
| 46 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
|
| 47 |
+
updated_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 48 |
+
);
|
| 49 |
+
|
| 50 |
+
CREATE UNIQUE INDEX idx_proteins_uniprot ON proteins(uniprot_accession);
|
| 51 |
+
CREATE INDEX idx_proteins_gene ON proteins(gene_symbol)
|
| 52 |
+
WHERE gene_symbol IS NOT NULL;
|
| 53 |
+
|
| 54 |
+
-- PPI experiments / evidence sources
|
| 55 |
+
CREATE TABLE ppi_experiments (
|
| 56 |
+
experiment_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 57 |
+
source_db TEXT NOT NULL CHECK (source_db IN (
|
| 58 |
+
'huri', 'intact', 'humap', 'string', 'biogrid', 'pdb_derived', 'literature')),
|
| 59 |
+
source_experiment_id TEXT NOT NULL,
|
| 60 |
+
experiment_type TEXT,
|
| 61 |
+
detection_method TEXT,
|
| 62 |
+
detection_method_id TEXT,
|
| 63 |
+
pubmed_id INTEGER,
|
| 64 |
+
doi TEXT,
|
| 65 |
+
description TEXT,
|
| 66 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
|
| 67 |
+
);
|
| 68 |
+
|
| 69 |
+
CREATE UNIQUE INDEX idx_ppi_exp_source
|
| 70 |
+
ON ppi_experiments(source_db, source_experiment_id);
|
| 71 |
+
|
| 72 |
+
-- Core fact table: PPI negative results
|
| 73 |
+
CREATE TABLE ppi_negative_results (
|
| 74 |
+
result_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 75 |
+
protein1_id INTEGER NOT NULL REFERENCES proteins(protein_id),
|
| 76 |
+
protein2_id INTEGER NOT NULL REFERENCES proteins(protein_id),
|
| 77 |
+
experiment_id INTEGER REFERENCES ppi_experiments(experiment_id),
|
| 78 |
+
|
| 79 |
+
evidence_type TEXT NOT NULL CHECK (evidence_type IN (
|
| 80 |
+
'experimental_non_interaction',
|
| 81 |
+
'ml_predicted_negative',
|
| 82 |
+
'low_score_negative',
|
| 83 |
+
'compartment_separated',
|
| 84 |
+
'literature_reported')),
|
| 85 |
+
|
| 86 |
+
confidence_tier TEXT NOT NULL CHECK (confidence_tier IN (
|
| 87 |
+
'gold', 'silver', 'bronze', 'copper')),
|
| 88 |
+
|
| 89 |
+
interaction_score REAL,
|
| 90 |
+
score_type TEXT,
|
| 91 |
+
num_evidence_types INTEGER,
|
| 92 |
+
|
| 93 |
+
detection_method TEXT,
|
| 94 |
+
detection_method_id TEXT,
|
| 95 |
+
organism_tested TEXT DEFAULT 'Homo sapiens',
|
| 96 |
+
|
| 97 |
+
source_db TEXT NOT NULL,
|
| 98 |
+
source_record_id TEXT NOT NULL,
|
| 99 |
+
extraction_method TEXT NOT NULL CHECK (extraction_method IN (
|
| 100 |
+
'database_direct', 'score_threshold',
|
| 101 |
+
'ml_classifier', 'text_mining',
|
| 102 |
+
'community_submitted')),
|
| 103 |
+
curator_validated INTEGER DEFAULT 0,
|
| 104 |
+
publication_year INTEGER,
|
| 105 |
+
|
| 106 |
+
created_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
|
| 107 |
+
updated_at TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
|
| 108 |
+
|
| 109 |
+
CHECK (protein1_id < protein2_id)
|
| 110 |
+
);
|
| 111 |
+
|
| 112 |
+
CREATE INDEX idx_ppi_nr_protein1 ON ppi_negative_results(protein1_id);
|
| 113 |
+
CREATE INDEX idx_ppi_nr_protein2 ON ppi_negative_results(protein2_id);
|
| 114 |
+
CREATE INDEX idx_ppi_nr_pair ON ppi_negative_results(protein1_id, protein2_id);
|
| 115 |
+
CREATE INDEX idx_ppi_nr_tier ON ppi_negative_results(confidence_tier);
|
| 116 |
+
CREATE INDEX idx_ppi_nr_source ON ppi_negative_results(source_db);
|
| 117 |
+
|
| 118 |
+
CREATE UNIQUE INDEX idx_ppi_nr_unique_source ON ppi_negative_results(
|
| 119 |
+
protein1_id, protein2_id,
|
| 120 |
+
COALESCE(experiment_id, -1),
|
| 121 |
+
source_db, source_record_id);
|
| 122 |
+
|
| 123 |
+
-- ============================================================
|
| 124 |
+
-- Aggregation table
|
| 125 |
+
-- ============================================================
|
| 126 |
+
|
| 127 |
+
CREATE TABLE protein_protein_pairs (
|
| 128 |
+
pair_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 129 |
+
protein1_id INTEGER NOT NULL REFERENCES proteins(protein_id),
|
| 130 |
+
protein2_id INTEGER NOT NULL REFERENCES proteins(protein_id),
|
| 131 |
+
num_experiments INTEGER NOT NULL,
|
| 132 |
+
num_sources INTEGER NOT NULL,
|
| 133 |
+
best_confidence TEXT NOT NULL,
|
| 134 |
+
best_evidence_type TEXT,
|
| 135 |
+
earliest_year INTEGER,
|
| 136 |
+
min_interaction_score REAL,
|
| 137 |
+
max_interaction_score REAL,
|
| 138 |
+
protein1_degree INTEGER,
|
| 139 |
+
protein2_degree INTEGER,
|
| 140 |
+
UNIQUE(protein1_id, protein2_id),
|
| 141 |
+
CHECK (protein1_id < protein2_id)
|
| 142 |
+
);
|
| 143 |
+
|
| 144 |
+
CREATE INDEX idx_ppp_protein1 ON protein_protein_pairs(protein1_id);
|
| 145 |
+
CREATE INDEX idx_ppp_protein2 ON protein_protein_pairs(protein2_id);
|
| 146 |
+
CREATE INDEX idx_ppp_confidence ON protein_protein_pairs(best_confidence);
|
| 147 |
+
|
| 148 |
+
-- ============================================================
|
| 149 |
+
-- Benchmark split tables
|
| 150 |
+
-- ============================================================
|
| 151 |
+
|
| 152 |
+
CREATE TABLE ppi_split_definitions (
|
| 153 |
+
split_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 154 |
+
split_name TEXT NOT NULL,
|
| 155 |
+
split_strategy TEXT NOT NULL CHECK (split_strategy IN (
|
| 156 |
+
'random', 'cold_protein', 'cold_both',
|
| 157 |
+
'bfs_cluster', 'degree_balanced')),
|
| 158 |
+
description TEXT,
|
| 159 |
+
random_seed INTEGER,
|
| 160 |
+
train_ratio REAL DEFAULT 0.7,
|
| 161 |
+
val_ratio REAL DEFAULT 0.1,
|
| 162 |
+
test_ratio REAL DEFAULT 0.2,
|
| 163 |
+
date_created TEXT DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
|
| 164 |
+
version TEXT DEFAULT '1.0',
|
| 165 |
+
UNIQUE(split_name, version)
|
| 166 |
+
);
|
| 167 |
+
|
| 168 |
+
CREATE TABLE ppi_split_assignments (
|
| 169 |
+
pair_id INTEGER NOT NULL REFERENCES protein_protein_pairs(pair_id),
|
| 170 |
+
split_id INTEGER NOT NULL REFERENCES ppi_split_definitions(split_id),
|
| 171 |
+
fold TEXT NOT NULL CHECK (fold IN ('train', 'val', 'test')),
|
| 172 |
+
PRIMARY KEY (pair_id, split_id)
|
| 173 |
+
);
|
| 174 |
+
|
| 175 |
+
CREATE INDEX idx_ppi_splits_fold ON ppi_split_assignments(split_id, fold);
|
| 176 |
+
|
| 177 |
+
-- Record migration
|
| 178 |
+
INSERT INTO schema_migrations (version) VALUES ('001');
|
migrations_ppi/002_llm_annotations.sql
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-- Migration 002: Add protein annotations and publication abstracts for LLM benchmark
|
| 2 |
+
-- Adds function_description, go_terms, domain_annotations to proteins table
|
| 3 |
+
-- Creates ppi_publication_abstracts table for PubMed abstract storage
|
| 4 |
+
|
| 5 |
+
INSERT OR IGNORE INTO schema_migrations (version) VALUES ('002');
|
| 6 |
+
|
| 7 |
+
ALTER TABLE proteins ADD COLUMN function_description TEXT;
|
| 8 |
+
ALTER TABLE proteins ADD COLUMN go_terms TEXT;
|
| 9 |
+
ALTER TABLE proteins ADD COLUMN domain_annotations TEXT;
|
| 10 |
+
|
| 11 |
+
CREATE TABLE IF NOT EXISTS ppi_publication_abstracts (
|
| 12 |
+
pmid INTEGER PRIMARY KEY,
|
| 13 |
+
title TEXT,
|
| 14 |
+
abstract TEXT NOT NULL,
|
| 15 |
+
publication_year INTEGER,
|
| 16 |
+
journal TEXT,
|
| 17 |
+
fetched_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 18 |
+
);
|
paper/appendix/app_checklist.tex
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
\input{checklist}
|
paper/appendix/app_contamination.tex
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\section{Full PPI Contamination Analysis}
|
| 2 |
+
\label{app:contamination}
|
| 3 |
+
|
| 4 |
+
This appendix presents the complete PPI L4 contamination analysis, including temporal stratification and protein popularity controls.
|
| 5 |
+
|
| 6 |
+
\subsection{Temporal Contamination}
|
| 7 |
+
|
| 8 |
+
Table~\ref{tab:ppi_contam_full} shows per-run accuracy for pre-2015 and post-2020 IntAct interaction pairs across all 20 PPI-L4 runs (5 models $\times$ 4 configs). All 20 runs exceed the 0.15 contamination threshold~\citep{balloccu2024leak}, with gaps ranging from 0.361 to 0.612.
|
| 9 |
+
|
| 10 |
+
\begin{table}[h]
|
| 11 |
+
\centering
|
| 12 |
+
\caption{PPI L4 temporal contamination: accuracy on pre-2015 vs.\ post-2020 pairs. Gap $>$ 0.15 indicates likely memorization. All runs flagged.}
|
| 13 |
+
\label{tab:ppi_contam_full}
|
| 14 |
+
\scriptsize
|
| 15 |
+
\begin{tabular}{@{}llcccc@{}}
|
| 16 |
+
\toprule
|
| 17 |
+
\textbf{Model} & \textbf{Config} & \textbf{Acc pre-2015} & \textbf{Acc post-2020} & \textbf{Gap} & \textbf{Flag} \\
|
| 18 |
+
\midrule
|
| 19 |
+
Claude Haiku-4.5 & 3-shot (set 0) & 0.598 & 0.041 & 0.557 & YES \\
|
| 20 |
+
Claude Haiku-4.5 & 3-shot (set 1) & 0.618 & 0.051 & 0.567 & YES \\
|
| 21 |
+
Claude Haiku-4.5 & 3-shot (set 2) & 0.569 & 0.031 & 0.538 & YES \\
|
| 22 |
+
Claude Haiku-4.5 & zero-shot & 0.422 & 0.020 & 0.401 & YES \\
|
| 23 |
+
\midrule
|
| 24 |
+
Gemini-2.5-Flash & 3-shot (set 0) & 0.765 & 0.184 & 0.581 & YES \\
|
| 25 |
+
Gemini-2.5-Flash & 3-shot (set 1) & 0.686 & 0.133 & 0.554 & YES \\
|
| 26 |
+
Gemini-2.5-Flash & 3-shot (set 2) & 0.706 & 0.184 & 0.522 & YES \\
|
| 27 |
+
Gemini-2.5-Flash & zero-shot & 0.588 & 0.133 & 0.456 & YES \\
|
| 28 |
+
\midrule
|
| 29 |
+
GPT-4o-mini & 3-shot (set 0) & 0.569 & 0.112 & 0.456 & YES \\
|
| 30 |
+
GPT-4o-mini & 3-shot (set 1) & 0.422 & 0.051 & 0.371 & YES \\
|
| 31 |
+
GPT-4o-mini & 3-shot (set 2) & 0.569 & 0.092 & 0.477 & YES \\
|
| 32 |
+
GPT-4o-mini & zero-shot & 0.762 & 0.245 & 0.517 & YES \\
|
| 33 |
+
\midrule
|
| 34 |
+
Llama-3.3-70B & 3-shot (set 0) & 0.422 & 0.041 & 0.381 & YES \\
|
| 35 |
+
Llama-3.3-70B & 3-shot (set 1) & 0.745 & 0.133 & 0.612 & YES \\
|
| 36 |
+
Llama-3.3-70B & 3-shot (set 2) & 0.402 & 0.041 & 0.361 & YES \\
|
| 37 |
+
Llama-3.3-70B & zero-shot & 0.794 & 0.204 & 0.590 & YES \\
|
| 38 |
+
\midrule
|
| 39 |
+
Qwen2.5-32B & 3-shot (set 0) & 0.588 & 0.112 & 0.476 & YES \\
|
| 40 |
+
Qwen2.5-32B & 3-shot (set 1) & 0.510 & 0.061 & 0.449 & YES \\
|
| 41 |
+
Qwen2.5-32B & 3-shot (set 2) & 0.529 & 0.082 & 0.448 & YES \\
|
| 42 |
+
Qwen2.5-32B & zero-shot & 0.598 & 0.071 & 0.527 & YES \\
|
| 43 |
+
\bottomrule
|
| 44 |
+
\end{tabular}
|
| 45 |
+
\end{table}
|
| 46 |
+
|
| 47 |
+
\subsection{Contamination vs.\ Protein Popularity}
|
| 48 |
+
|
| 49 |
+
A potential confound: pre-2015 proteins may be better-studied (higher degree in interaction networks), and models might simply know more about popular proteins regardless of memorization. To control for this, we stratify L4 accuracy by protein pair degree (median split at degree 172.2).
|
| 50 |
+
|
| 51 |
+
Table~\ref{tab:ppi_contam_popularity} shows that the temporal gap persists in \emph{both} high-degree and low-degree protein pairs for all five models. Model-averaged gaps:
|
| 52 |
+
\begin{itemize}[nosep,leftmargin=*]
|
| 53 |
+
\item \textbf{High-degree pairs}: gaps 0.44--0.62 (all $\gg$ 0.15)
|
| 54 |
+
\item \textbf{Low-degree pairs}: gaps 0.30--0.57 (all $\gg$ 0.15)
|
| 55 |
+
\end{itemize}
|
| 56 |
+
|
| 57 |
+
This confirms that the temporal signal reflects genuine memorization of interaction databases, not a popularity confound. Notably, Gemini-2.5-Flash shows a \emph{stronger} gap for low-degree pairs (0.53 vs.\ 0.50), suggesting it has particularly memorized obscure protein interactions.
|
| 58 |
+
|
| 59 |
+
\begin{table}[h]
|
| 60 |
+
\centering
|
| 61 |
+
\caption{PPI L4 contamination stratified by protein popularity. Gap persists for both high-degree and low-degree pairs, confirming true memorization.}
|
| 62 |
+
\label{tab:ppi_contam_popularity}
|
| 63 |
+
\scriptsize
|
| 64 |
+
\begin{tabular}{@{}llcccccc@{}}
|
| 65 |
+
\toprule
|
| 66 |
+
\textbf{Model} & \textbf{Config} & \multicolumn{2}{c}{\textbf{Pre-2015 Acc}} & \multicolumn{2}{c}{\textbf{Post-2020 Acc}} & \multicolumn{2}{c}{\textbf{Gap}} \\
|
| 67 |
+
\cmidrule(lr){3-4} \cmidrule(lr){5-6} \cmidrule(lr){7-8}
|
| 68 |
+
& & High & Low & High & Low & High & Low \\
|
| 69 |
+
\midrule
|
| 70 |
+
Haiku-4.5 & 3-shot & 0.661 & 0.514 & 0.045 & 0.037 & 0.615 & 0.478 \\
|
| 71 |
+
Haiku-4.5 & zero-shot & 0.518 & 0.304 & 0.045 & 0.000 & 0.472 & 0.304 \\
|
| 72 |
+
Gemini & 3-shot & 0.768 & 0.660 & 0.250 & 0.099 & 0.518 & 0.561 \\
|
| 73 |
+
Gemini & zero-shot & 0.643 & 0.522 & 0.205 & 0.074 & 0.438 & 0.448 \\
|
| 74 |
+
GPT-4o-mini & 3-shot & 0.637 & 0.377 & 0.099 & 0.074 & 0.539 & 0.303 \\
|
| 75 |
+
GPT-4o-mini & zero-shot & 0.855 & 0.652 & 0.250 & 0.241 & 0.605 & 0.411 \\
|
| 76 |
+
Llama-3.3 & 3-shot & 0.643 & 0.377 & 0.129 & 0.025 & 0.514 & 0.352 \\
|
| 77 |
+
Llama-3.3 & zero-shot & 0.857 & 0.717 & 0.273 & 0.148 & 0.584 & 0.569 \\
|
| 78 |
+
Qwen2.5 & 3-shot & 0.655 & 0.406 & 0.129 & 0.050 & 0.526 & 0.357 \\
|
| 79 |
+
Qwen2.5 & zero-shot & 0.679 & 0.500 & 0.091 & 0.056 & 0.588 & 0.444 \\
|
| 80 |
+
\bottomrule
|
| 81 |
+
\end{tabular}
|
| 82 |
+
\end{table}
|
| 83 |
+
|
| 84 |
+
\subsection{Model-Averaged Summary}
|
| 85 |
+
|
| 86 |
+
\begin{table}[h]
|
| 87 |
+
\centering
|
| 88 |
+
\caption{Model-averaged contamination gaps by protein popularity.}
|
| 89 |
+
\label{tab:ppi_contam_summary}
|
| 90 |
+
\scriptsize
|
| 91 |
+
\begin{tabular}{@{}lccc@{}}
|
| 92 |
+
\toprule
|
| 93 |
+
\textbf{Model} & \textbf{Avg Gap (High)} & \textbf{Avg Gap (Low)} & \textbf{Verdict} \\
|
| 94 |
+
\midrule
|
| 95 |
+
Claude Haiku-4.5 & 0.580 & 0.434 & True contamination \\
|
| 96 |
+
Gemini-2.5-Flash & 0.498 & 0.532 & True contamination (stronger for obscure) \\
|
| 97 |
+
GPT-4o-mini & 0.555 & 0.330 & True contamination \\
|
| 98 |
+
Llama-3.3-70B & 0.532 & 0.406 & True contamination \\
|
| 99 |
+
Qwen2.5-32B & 0.541 & 0.378 & True contamination \\
|
| 100 |
+
\bottomrule
|
| 101 |
+
\end{tabular}
|
| 102 |
+
\end{table}
|
| 103 |
+
|
| 104 |
+
\textbf{Key finding:} All five models show contamination gaps exceeding the 0.15 threshold in both high-degree and low-degree strata. The contamination signal is robust to protein popularity and reflects genuine memorization of interaction databases rather than a confound with protein familiarity.
|
paper/appendix/app_croissant.tex
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\section{Croissant Metadata}
|
| 2 |
+
\label{app:croissant}
|
| 3 |
+
|
| 4 |
+
NegBioDB provides Croissant JSON-LD metadata~\citep{akhtar2024croissant} for machine-readable dataset discovery, as required by the NeurIPS 2026 Evaluations \& Datasets Track. The metadata file (\texttt{croissant.json}) is distributed alongside the dataset on HuggingFace and describes all three domain databases and their ML export files.
|
| 5 |
+
|
| 6 |
+
The Croissant metadata includes:
|
| 7 |
+
\begin{itemize}[nosep,leftmargin=*]
|
| 8 |
+
\item \textbf{Dataset-level fields:} Name, description, license (CC BY-SA 4.0), URL, creator, date published, citation.
|
| 9 |
+
\item \textbf{Distribution:} Three SQLite databases (\texttt{negbiodb.db}, \texttt{negbiodb\_ct.db}, \texttt{negbiodb\_ppi.db}) and three Parquet ML export files.
|
| 10 |
+
\item \textbf{Record sets:} Schema definitions for each domain's primary fact table (\texttt{negative\_results}, \texttt{trial\_failure\_results}, \texttt{ppi\_negative\_results}) including field names, types, and descriptions.
|
| 11 |
+
\item \textbf{ML-specific fields:} Split definitions, positive/negative label semantics, and recommended evaluation metrics per domain.
|
| 12 |
+
\end{itemize}
|
| 13 |
+
|
| 14 |
+
The metadata validates successfully with the \texttt{mlcroissant} Python library (v1.0+). A minimal excerpt:
|
| 15 |
+
|
| 16 |
+
\begin{small}
|
| 17 |
+
\begin{verbatim}
|
| 18 |
+
{
|
| 19 |
+
"@context": {"@vocab": "https://schema.org/",
|
| 20 |
+
"cr": "http://mlcommons.org/croissant/"},
|
| 21 |
+
"@type": "cr:Dataset",
|
| 22 |
+
"name": "NegBioDB",
|
| 23 |
+
"description": "Multi-domain database of experimentally confirmed
|
| 24 |
+
negative results in biomedicine",
|
| 25 |
+
"license": "https://creativecommons.org/licenses/by-sa/4.0/",
|
| 26 |
+
"distribution": [
|
| 27 |
+
{"@type": "cr:FileObject",
|
| 28 |
+
"name": "negbiodb.db",
|
| 29 |
+
"contentUrl": "https://huggingface.co/.../negbiodb.db",
|
| 30 |
+
"encodingFormat": "application/x-sqlite3"},
|
| 31 |
+
{"@type": "cr:FileObject",
|
| 32 |
+
"name": "negbiodb_ct.db", ...},
|
| 33 |
+
{"@type": "cr:FileObject",
|
| 34 |
+
"name": "negbiodb_ppi.db", ...}
|
| 35 |
+
],
|
| 36 |
+
"recordSet": [
|
| 37 |
+
{"@type": "cr:RecordSet",
|
| 38 |
+
"name": "dti_negative_results",
|
| 39 |
+
"field": [
|
| 40 |
+
{"name": "compound_id", "@type": "cr:Field",
|
| 41 |
+
"dataType": "sc:Integer"},
|
| 42 |
+
{"name": "target_id", ...},
|
| 43 |
+
{"name": "confidence_tier", "dataType": "sc:Text"},
|
| 44 |
+
{"name": "activity_value", "dataType": "sc:Float"},
|
| 45 |
+
...
|
| 46 |
+
]}
|
| 47 |
+
]
|
| 48 |
+
}
|
| 49 |
+
\end{verbatim}
|
| 50 |
+
\end{small}
|
| 51 |
+
|
| 52 |
+
The full Croissant JSON-LD file is available in the dataset repository.
|
paper/appendix/app_datasheet.tex
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\section{Datasheet for Datasets}
|
| 2 |
+
\label{app:datasheet}
|
| 3 |
+
|
| 4 |
+
Following the framework of~\citet{gebru2021datasheets}, we provide a datasheet for NegBioDB.
|
| 5 |
+
|
| 6 |
+
\subsection{Motivation}
|
| 7 |
+
|
| 8 |
+
\textbf{For what purpose was the dataset created?}
|
| 9 |
+
NegBioDB was created to address the absence of curated negative results in biomedical AI benchmarks. Existing benchmarks treat untested entity pairs as negatives, an assumption that inflates model performance and prevents evaluation of genuine negative result understanding.
|
| 10 |
+
|
| 11 |
+
\textbf{Who created the dataset and on behalf of which entity?}
|
| 12 |
+
The dataset was created by a single researcher at Weill Cornell Medicine as part of a doctoral research project.
|
| 13 |
+
|
| 14 |
+
\textbf{Who funded the creation of the dataset?}
|
| 15 |
+
No external funding was received for this project.
|
| 16 |
+
|
| 17 |
+
\subsection{Composition}
|
| 18 |
+
|
| 19 |
+
\textbf{What do the instances that comprise the dataset represent?}
|
| 20 |
+
Each instance represents an experimentally confirmed negative result: a biological hypothesis that was tested and found to be unsupported. Specifically:
|
| 21 |
+
\begin{itemize}[nosep,leftmargin=*]
|
| 22 |
+
\item \textbf{DTI}: A compound--target pair tested for binding activity and found inactive (e.g., IC$_{50}$ $>$ 10$\mu$M).
|
| 23 |
+
\item \textbf{CT}: A drug--condition pair tested in a clinical trial that failed to meet its primary endpoint.
|
| 24 |
+
\item \textbf{PPI}: A protein--protein pair tested for physical interaction and found non-interacting.
|
| 25 |
+
\end{itemize}
|
| 26 |
+
|
| 27 |
+
\textbf{How many instances are there in total?}
|
| 28 |
+
32.9 million negative results: 30.5M (DTI), 132,925 (CT), and 2.23M (PPI).
|
| 29 |
+
|
| 30 |
+
\textbf{Does the dataset contain all possible instances or is it a sample?}
|
| 31 |
+
It is a curated sample from 12 source databases. DTI includes all ChEMBL pchembl$<$5 records, PubChem confirmatory inactives, BindingDB Kd$>$10$\mu$M, and DAVIS matrix inactives. CT includes all AACT trials classified as clinical failures plus CTO copper-tier records. PPI includes IntAct curated non-interactions, HuRI screen negatives (sampled from 39.9M), hu.MAP ML-derived negatives, and STRING zero-score pairs (sampled from $>$100M).
|
| 32 |
+
|
| 33 |
+
\textbf{What data does each instance consist of?}
|
| 34 |
+
Entity identifiers (ChEMBL IDs, UniProt accessions, NCT IDs), experimental metadata (assay type, detection method, p-values), outcome measures (activity values, effect sizes), confidence tier assignment, and provenance information (source database, extraction method, publication year).
|
| 35 |
+
|
| 36 |
+
\textbf{Is there a label or target associated with each instance?}
|
| 37 |
+
Yes. All instances are labeled as negative results with a four-tier confidence classification: gold (systematic screens, multiple confirmations), silver (ML-derived or p-value based), bronze (computational or NLP-detected), copper (label-only, minimal evidence).
|
| 38 |
+
|
| 39 |
+
\textbf{Is any information missing from individual instances?}
|
| 40 |
+
Activity values are missing for some DTI records (especially PubChem inactives that report only active/inactive). Clinical trial p-values are available only for gold/silver tier CT records. PPI records from STRING lack experimental metadata.
|
| 41 |
+
|
| 42 |
+
\textbf{Are there any errors, sources of noise, or redundancies?}
|
| 43 |
+
Potential errors include: NLP misclassification of CT failure categories (bronze tier), false negatives in HuRI Y2H screens (estimated 20--40\% false negative rate for Y2H), and activity value discrepancies across sources for DTI. Deduplication indexes prevent exact duplicates but near-duplicate records from different sources are preserved as multi-source evidence.
|
| 44 |
+
|
| 45 |
+
\textbf{Is the dataset self-contained?}
|
| 46 |
+
Yes. The SQLite databases contain all necessary data. Chemical structures (SMILES), protein sequences, and trial metadata are stored directly. External identifiers enable linkage to source databases for additional context.
|
| 47 |
+
|
| 48 |
+
\subsection{Collection Process}
|
| 49 |
+
|
| 50 |
+
\textbf{How was the data associated with each instance acquired?}
|
| 51 |
+
All data was acquired from public databases via their official APIs or bulk download services:
|
| 52 |
+
\begin{itemize}[nosep,leftmargin=*]
|
| 53 |
+
\item ChEMBL v34 (SQL dump), PubChem (BioAssay FTP), BindingDB (servlet download), DAVIS (literature supplement)
|
| 54 |
+
\item AACT (monthly PostgreSQL dump), CTO (GitHub release), Open Targets (API), Shi \& Du 2024 (supplement)
|
| 55 |
+
\item IntAct (PSI-MI TAB), HuRI (interactome-atlas.org), hu.MAP 3.0 (bulk download), STRING v12.0 (API)
|
| 56 |
+
\end{itemize}
|
| 57 |
+
|
| 58 |
+
\textbf{What mechanisms or procedures were used to collect the data?}
|
| 59 |
+
Automated ETL pipelines with validation checks. Drug names in CT were resolved to ChEMBL identifiers via a 4-step cascade: exact ChEMBL match $\to$ PubChem API $\to$ fuzzy matching (Jaro-Winkler $>$ 0.90) $\to$ manual CSV. CT failure categories were assigned via 3-tier detection: NLP keyword matching $\to$ p-value extraction $\to$ CTO labels.
|
| 60 |
+
|
| 61 |
+
\textbf{Who was involved in the data collection process?}
|
| 62 |
+
A single researcher performed all data collection, processing, and validation. 800+ automated tests verify pipeline correctness.
|
| 63 |
+
|
| 64 |
+
\textbf{Over what timeframe was the data collected?}
|
| 65 |
+
Data collection occurred January--March 2026. Source databases span different time periods: ChEMBL v34 (through 2024), AACT (through February 2026), IntAct (through 2024), HuRI (2020 publication).
|
| 66 |
+
|
| 67 |
+
\subsection{Preprocessing, Cleaning, Labeling}
|
| 68 |
+
|
| 69 |
+
\textbf{Was any preprocessing/cleaning/labeling of the data done?}
|
| 70 |
+
\begin{itemize}[nosep,leftmargin=*]
|
| 71 |
+
\item \textbf{DTI}: SMILES canonicalization via RDKit, InChIKey generation, pchembl value computation, activity value unit normalization. Confidence tier assignment based on source and assay type.
|
| 72 |
+
\item \textbf{CT}: Three-tier failure classification (NLP/p-value/CTO), drug name resolution to ChEMBL, failure category assignment (8 categories). Tier upgrades applied (bronze+p-value$\to$silver).
|
| 73 |
+
\item \textbf{PPI}: UniProt accession validation, ENSG version suffix stripping, canonical pair ordering (protein1\_id $<$ protein2\_id), reservoir sampling for HuRI and STRING.
|
| 74 |
+
\end{itemize}
|
| 75 |
+
|
| 76 |
+
\textbf{Was the ``raw'' data saved in addition to the preprocessed/cleaned/labeled data?}
|
| 77 |
+
Source database downloads are archived but not distributed due to size (AACT: 2.23 GB, ChEMBL: several GB). The processed SQLite databases and Parquet exports are the distributed artifacts.
|
| 78 |
+
|
| 79 |
+
\subsection{Uses}
|
| 80 |
+
|
| 81 |
+
\textbf{Has the dataset been used for any tasks already?}
|
| 82 |
+
Yes, for the NegBioBench benchmark described in this paper: 180 ML experiments and 241 LLM experiments across three domains.
|
| 83 |
+
|
| 84 |
+
\textbf{What (other) tasks could the dataset be used for?}
|
| 85 |
+
Drug repurposing (negative results constrain hypothesis space), clinical trial design (learning from prior failures), protein interaction network refinement, negative-aware training for DTI/PPI prediction models, and LLM evaluation for scientific reasoning.
|
| 86 |
+
|
| 87 |
+
\textbf{Is there anything about the composition of the dataset or the way it was collected that might impact future uses?}
|
| 88 |
+
The CC BY-SA 4.0 license (required by ChEMBL's CC BY-SA 3.0 viral clause) means derivative works must maintain the same license. DTI bronze tier (94.6\% of records) has lower confidence than gold/silver tiers. CT drug resolution achieved only 20.6\% ChEMBL coverage, limiting chemical feature availability.
|
| 89 |
+
|
| 90 |
+
\subsection{Distribution}
|
| 91 |
+
|
| 92 |
+
\textbf{How will the dataset be distributed?}
|
| 93 |
+
Via HuggingFace Datasets Hub (primary), GitHub repository (code + small exports), and Zenodo (archival DOI).
|
| 94 |
+
|
| 95 |
+
\textbf{When will the dataset be released?}
|
| 96 |
+
Upon paper acceptance or preprint posting.
|
| 97 |
+
|
| 98 |
+
\textbf{Will the dataset be distributed under a copyright or IP license?}
|
| 99 |
+
CC BY-SA 4.0 International.
|
| 100 |
+
|
| 101 |
+
\textbf{Have any third parties imposed IP-based or other restrictions?}
|
| 102 |
+
ChEMBL's CC BY-SA 3.0 license requires share-alike, which propagates to the full dataset. All other sources use permissive licenses (public domain, Apache 2.0, MIT, CC BY 4.0).
|
| 103 |
+
|
| 104 |
+
\subsection{Maintenance}
|
| 105 |
+
|
| 106 |
+
\textbf{Who will be supporting/hosting/maintaining the dataset?}
|
| 107 |
+
The first author, with institutional support from Weill Cornell Medicine. HuggingFace provides persistent hosting.
|
| 108 |
+
|
| 109 |
+
\textbf{How can the owner/curator/manager of the dataset be contacted?}
|
| 110 |
+
Via the GitHub repository issue tracker or the corresponding author email listed in the paper.
|
| 111 |
+
|
| 112 |
+
\textbf{Will the dataset be updated?}
|
| 113 |
+
We plan annual updates incorporating new ChEMBL releases, AACT snapshots, and additional PPI databases. Version tags and checksums enable reproducible access to specific releases.
|
| 114 |
+
|
| 115 |
+
\textbf{Will older versions of the dataset continue to be available?}
|
| 116 |
+
Yes, via Zenodo DOI versioning and HuggingFace dataset revisions.
|
| 117 |
+
|
| 118 |
+
\textbf{If others want to contribute to the dataset, is there a mechanism?}
|
| 119 |
+
The schema includes \texttt{community\_submitted} extraction method and \texttt{curator\_validated} flags. A contribution platform is planned for future work.
|
paper/appendix/app_l3_analysis.tex
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\section{L3 Reasoning Analysis}
|
| 2 |
+
\label{app:l3_analysis}
|
| 3 |
+
|
| 4 |
+
L3 evaluates open-ended scientific reasoning about negative results using an LLM-as-judge scoring on four dimensions (1--5 scale). Each domain uses a domain-appropriate judge: GPT-4o-mini for CT, Gemini-2.5-Flash for PPI, and Gemini-2.5-Flash-Lite for DTI. We present the full per-dimension breakdown and discuss the ceiling effect that motivated deferring L3 from the main text.
|
| 5 |
+
|
| 6 |
+
\subsection{Judge Ceiling Effect}
|
| 7 |
+
|
| 8 |
+
A ceiling effect is present in two domains:
|
| 9 |
+
\begin{itemize}[nosep,leftmargin=*]
|
| 10 |
+
\item \textbf{CT} (GPT-4o-mini judge): Overall scores range 4.45--5.00 (Table~\ref{tab:ct_l3_full}). Three of five models (Haiku, Gemini, Qwen) receive perfect 5.0 scores in zero-shot mode; GPT-4o-mini scores 4.66 and Llama-3.3-70B scores 4.997. This ceiling confounds meaningful cross-model comparison.
|
| 11 |
+
\item \textbf{PPI} (Gemini-2.5-Flash judge): Zero-shot scores range 4.28--4.68 (Table~\ref{tab:ppi_l3_full}). While lower than CT, the range is still compressed. Notably, Llama-3.3-70B 3-shot has a 51.5\% formatting error rate; successful completions score 2.05$\pm$0.92, but overall 3-shot performance is unreliable.
|
| 12 |
+
\end{itemize}
|
| 13 |
+
|
| 14 |
+
DTI L3 shows the most meaningful variation (3.02--4.66), with Gemini-2.5-Flash achieving the highest overall score.
|
| 15 |
+
|
| 16 |
+
\subsection{PPI L3 Per-Dimension Scores}
|
| 17 |
+
|
| 18 |
+
Table~\ref{tab:ppi_l3_dims} reveals that \textbf{structural reasoning} (dimension 2) is the most challenging aspect across all models. Even top-performing models score 1.2--4.4 on structural reasoning while achieving 4.6--5.0 on biological plausibility. This suggests models excel at identifying functional mismatches between proteins but struggle with detailed structural arguments about binding interfaces, domain compatibility, and steric constraints.
|
| 19 |
+
|
| 20 |
+
\begin{table}[h]
|
| 21 |
+
\centering
|
| 22 |
+
\caption{PPI L3 per-dimension judge scores. Structural reasoning is consistently the weakest dimension. $\dagger$Llama-3.3-70B 3-shot has 51.5\% output failure rate; scores computed from successful completions only.}
|
| 23 |
+
\label{tab:ppi_l3_dims}
|
| 24 |
+
\scriptsize
|
| 25 |
+
\begin{tabular}{@{}llccccc@{}}
|
| 26 |
+
\toprule
|
| 27 |
+
\textbf{Model} & \textbf{Config} & \textbf{Bio.\ Plaus.} & \textbf{Struct.\ Reas.} & \textbf{Mech.\ Compl.} & \textbf{Specificity} & \textbf{Overall} \\
|
| 28 |
+
\midrule
|
| 29 |
+
Claude Haiku-4.5 & 3-shot & 4.98 & 2.25 & 3.17 & 4.39 & 3.70 \\
|
| 30 |
+
Claude Haiku-4.5 & zero-shot & 5.00 & 4.29 & 4.46 & 4.99 & 4.68 \\
|
| 31 |
+
Gemini-2.5-Flash & 3-shot & 4.71 & 1.24 & 2.70 & 3.77 & 3.11 \\
|
| 32 |
+
Gemini-2.5-Flash & zero-shot & 5.00 & 4.39 & 4.19 & 5.00 & 4.65 \\
|
| 33 |
+
GPT-4o-mini & 3-shot & 4.80 & 1.34 & 2.78 & 3.92 & 3.21 \\
|
| 34 |
+
GPT-4o-mini & zero-shot & 4.87 & 3.98 & 3.87 & 4.73 & 4.36 \\
|
| 35 |
+
Llama-3.3-70B & 3-shot & 2.81 & 1.18 & 1.80 & 2.43 & 2.05$^\dagger$ \\
|
| 36 |
+
Llama-3.3-70B & zero-shot & 4.77 & 3.94 & 3.70 & 4.71 & 4.28 \\
|
| 37 |
+
Qwen2.5-32B & 3-shot & 4.92 & 1.96 & 3.06 & 4.10 & 3.51 \\
|
| 38 |
+
Qwen2.5-32B & zero-shot & 4.91 & 4.01 & 4.02 & 4.87 & 4.45 \\
|
| 39 |
+
\bottomrule
|
| 40 |
+
\end{tabular}
|
| 41 |
+
\end{table}
|
| 42 |
+
|
| 43 |
+
\subsection{3-shot Degradation in PPI L3}
|
| 44 |
+
|
| 45 |
+
A notable phenomenon: 3-shot prompting \emph{degrades} PPI L3 scores compared to zero-shot for all models (e.g., Gemini: 4.65$\to$3.11, GPT-4o-mini: 4.36$\to$3.21). This contrasts with L1 where 3-shot dramatically improves performance. We hypothesize that few-shot examples constrain the model's reasoning to follow a specific template, reducing the depth and specificity of explanations compared to unconstrained zero-shot generation. This effect is most extreme for Llama-3.3-70B, where 3-shot examples cause a 51.5\% output failure rate---successful completions score only 2.05$\pm$0.92.
|
| 46 |
+
|
| 47 |
+
\subsection{Cross-Domain L3 Comparison}
|
| 48 |
+
|
| 49 |
+
Despite the ceiling effect, two patterns emerge:
|
| 50 |
+
\begin{enumerate}[nosep,leftmargin=*]
|
| 51 |
+
\item \textbf{Gemini-2.5-Flash consistently leads} across DTI (4.62 3-shot) and PPI (4.65 zero-shot), producing the most scientifically specific explanations.
|
| 52 |
+
\item \textbf{Specificity is the hardest dimension in DTI} (2.41--4.22) but not in PPI (3.77--5.00), suggesting that DTI requires more specialized pharmacological knowledge that models lack.
|
| 53 |
+
\end{enumerate}
|
paper/appendix/app_llm_tables.tex
ADDED
|
@@ -0,0 +1,279 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\section{Complete LLM Results}
|
| 2 |
+
\label{app:llm_tables}
|
| 3 |
+
|
| 4 |
+
This appendix presents complete LLM results for all 241 experiments. DTI uses 6 models (81 runs); CT and PPI each use 5 models (80 runs each). Each model is evaluated in zero-shot and three 3-shot configurations (different random example sets). 3-shot results report mean $\pm$ std across the three example sets.
|
| 5 |
+
|
| 6 |
+
\subsection{DTI LLM Results (81 runs)}
|
| 7 |
+
|
| 8 |
+
DTI models: Gemini-2.5-Flash, Gemini-2.5-Flash-Lite, GPT-4o-mini, Llama-3.3-70B, Mistral-7B, Qwen2.5-32B. L2 was not evaluated for DTI due to annotation cost.
|
| 9 |
+
|
| 10 |
+
\begin{table}[h]
|
| 11 |
+
\centering
|
| 12 |
+
\caption{DTI L1 (MCQ, 4-way classification). N=3 for 3-shot (mean$\pm$std), N=1 for zero-shot.}
|
| 13 |
+
\label{tab:dti_l1_full}
|
| 14 |
+
\scriptsize
|
| 15 |
+
\begin{tabular}{@{}llccc@{}}
|
| 16 |
+
\toprule
|
| 17 |
+
\textbf{Model} & \textbf{Config} & \textbf{Accuracy} & \textbf{Macro-F1} & \textbf{MCC} \\
|
| 18 |
+
\midrule
|
| 19 |
+
Gemini-2.5-Flash & 3-shot & 1.000$\pm$0.000 & 1.000$\pm$0.000 & 1.000$\pm$0.000 \\
|
| 20 |
+
Gemini-2.5-Flash & zero-shot & 0.721 & 0.630 & 0.610 \\
|
| 21 |
+
Gemini-2.5-Flash-Lite & 3-shot & 0.971$\pm$0.019 & 0.964$\pm$0.031 & 0.960$\pm$0.026 \\
|
| 22 |
+
Gemini-2.5-Flash-Lite & zero-shot & 0.807 & 0.705 & 0.750 \\
|
| 23 |
+
GPT-4o-mini & 3-shot & 0.944$\pm$0.012 & 0.941$\pm$0.014 & 0.922$\pm$0.016 \\
|
| 24 |
+
GPT-4o-mini & zero-shot & 0.736 & 0.642 & 0.632 \\
|
| 25 |
+
Llama-3.3-70B & 3-shot & 0.991$\pm$0.010 & 0.991$\pm$0.010 & 0.987$\pm$0.014 \\
|
| 26 |
+
Llama-3.3-70B & zero-shot & 0.613 & 0.501 & 0.430 \\
|
| 27 |
+
Mistral-7B & 3-shot & 0.650$\pm$0.060 & 0.496$\pm$0.108 & 0.502$\pm$0.086 \\
|
| 28 |
+
Mistral-7B & zero-shot & 0.708 & 0.551 & 0.593 \\
|
| 29 |
+
Qwen2.5-32B & 3-shot & 0.977$\pm$0.012 & 0.978$\pm$0.012 & 0.968$\pm$0.017 \\
|
| 30 |
+
Qwen2.5-32B & zero-shot & 0.728 & 0.635 & 0.620 \\
|
| 31 |
+
\bottomrule
|
| 32 |
+
\end{tabular}
|
| 33 |
+
\end{table}
|
| 34 |
+
|
| 35 |
+
\begin{table}[h]
|
| 36 |
+
\centering
|
| 37 |
+
\caption{DTI L3 (reasoning, LLM-as-judge). Judge dimensions: accuracy, completeness, reasoning quality, specificity. Overall = mean of 4 dimensions.}
|
| 38 |
+
\label{tab:dti_l3_full}
|
| 39 |
+
\scriptsize
|
| 40 |
+
\begin{tabular}{@{}llccccc@{}}
|
| 41 |
+
\toprule
|
| 42 |
+
\textbf{Model} & \textbf{Config} & \textbf{Accuracy} & \textbf{Complete.} & \textbf{Reasoning} & \textbf{Specific.} & \textbf{Overall} \\
|
| 43 |
+
\midrule
|
| 44 |
+
Gemini-2.5-Flash & 3-shot & 4.88$\pm$0.01 & 4.49$\pm$0.08 & 4.89$\pm$0.02 & 4.22$\pm$0.01 & 4.62$\pm$0.02 \\
|
| 45 |
+
Gemini-2.5-Flash & zero-shot & 4.85 & 4.70 & 4.80 & 4.30 & 4.66 \\
|
| 46 |
+
Gemini-Lite & 3-shot & 4.31$\pm$0.03 & 4.06$\pm$0.03 & 4.28$\pm$0.03 & 3.23$\pm$0.05 & 3.97$\pm$0.02 \\
|
| 47 |
+
Gemini-Lite & zero-shot & 4.20 & 4.05 & 4.10 & 3.08 & 3.86 \\
|
| 48 |
+
GPT-4o-mini & 3-shot & 4.06$\pm$0.01 & 4.04$\pm$0.02 & 4.02$\pm$0.02 & 2.59$\pm$0.04 & 3.68$\pm$0.02 \\
|
| 49 |
+
GPT-4o-mini & zero-shot & 3.97 & 3.97 & 3.97 & 2.85 & 3.69 \\
|
| 50 |
+
Llama-3.3-70B & 3-shot & 4.18$\pm$0.06 & 4.10$\pm$0.04 & 4.03$\pm$0.04 & 2.41$\pm$0.07 & 3.68$\pm$0.05 \\
|
| 51 |
+
Llama-3.3-70B & zero-shot & 3.94 & 3.86 & 3.72 & 2.36 & 3.47 \\
|
| 52 |
+
Mistral-7B & 3-shot & 3.46$\pm$0.21 & 3.63$\pm$0.15 & 3.23$\pm$0.05 & 1.77$\pm$0.10 & 3.02$\pm$0.08 \\
|
| 53 |
+
Mistral-7B & zero-shot & 3.55 & 3.66 & 3.48 & 2.24 & 3.23 \\
|
| 54 |
+
Qwen2.5-32B & 3-shot & 4.04$\pm$0.08 & 4.02$\pm$0.06 & 3.90$\pm$0.07 & 2.66$\pm$0.01 & 3.65$\pm$0.05 \\
|
| 55 |
+
Qwen2.5-32B & zero-shot & 3.97 & 3.97 & 3.87 & 2.82 & 3.66 \\
|
| 56 |
+
\bottomrule
|
| 57 |
+
\end{tabular}
|
| 58 |
+
\end{table}
|
| 59 |
+
|
| 60 |
+
\begin{table}[h]
|
| 61 |
+
\centering
|
| 62 |
+
\caption{DTI L4 (discrimination, tested vs.\ untested). Evidence citation rate measures hallucination.}
|
| 63 |
+
\label{tab:dti_l4_full}
|
| 64 |
+
\scriptsize
|
| 65 |
+
\begin{tabular}{@{}llccccc@{}}
|
| 66 |
+
\toprule
|
| 67 |
+
\textbf{Model} & \textbf{Config} & \textbf{Accuracy} & \textbf{MCC} & \textbf{Contam.\ Gap} & \textbf{Halluc.\ Rate} \\
|
| 68 |
+
\midrule
|
| 69 |
+
Gemini-2.5-Flash & 3-shot & 0.478$\pm$0.006 & $-$0.102$\pm$0.005 & 0.033$\pm$0.036 & 1.000 \\
|
| 70 |
+
Gemini-2.5-Flash & zero-shot & 0.427 & $-$0.234 & 0.047 & 0.994 \\
|
| 71 |
+
Gemini-Lite & 3-shot & 0.570$\pm$0.040 & 0.181$\pm$0.087 & 0.022$\pm$0.027 & 0.990$\pm$0.008 \\
|
| 72 |
+
Gemini-Lite & zero-shot & 0.350 & $-$0.349 & 0.146 & 0.727 \\
|
| 73 |
+
GPT-4o-mini & 3-shot & 0.512$\pm$0.011 & 0.037$\pm$0.036 & 0.249$\pm$0.011 & 0.991$\pm$0.012 \\
|
| 74 |
+
GPT-4o-mini & zero-shot & 0.517 & 0.047 & 0.233 & 1.000 \\
|
| 75 |
+
Llama-3.3-70B & 3-shot & 0.589$\pm$0.026 & 0.184$\pm$0.051 & 0.242$\pm$0.014 & 1.000 \\
|
| 76 |
+
Llama-3.3-70B & zero-shot & 0.540 & 0.101 & $-$0.024 & 1.000 \\
|
| 77 |
+
Mistral-7B & 3-shot & 0.491$\pm$0.013 & $-$0.030$\pm$0.042 & 0.049$\pm$0.047 & 1.000 \\
|
| 78 |
+
Mistral-7B & zero-shot & 0.500 & 0.000 & 0.000 & 1.000 \\
|
| 79 |
+
Qwen2.5-32B & 3-shot & 0.538$\pm$0.013 & 0.113$\pm$0.038 & 0.163$\pm$0.021 & 1.000 \\
|
| 80 |
+
Qwen2.5-32B & zero-shot & 0.510 & 0.046 & 0.098 & 1.000 \\
|
| 81 |
+
\bottomrule
|
| 82 |
+
\end{tabular}
|
| 83 |
+
\end{table}
|
| 84 |
+
|
| 85 |
+
\clearpage
|
| 86 |
+
|
| 87 |
+
\subsection{CT LLM Results (80 runs)}
|
| 88 |
+
|
| 89 |
+
CT models: Claude Haiku-4.5, Gemini-2.5-Flash, GPT-4o-mini, Llama-3.3-70B, Qwen2.5-32B.
|
| 90 |
+
|
| 91 |
+
\begin{table}[h]
|
| 92 |
+
\centering
|
| 93 |
+
\caption{CT-L1 (MCQ, 5-way failure classification).}
|
| 94 |
+
\label{tab:ct_l1_full}
|
| 95 |
+
\scriptsize
|
| 96 |
+
\begin{tabular}{@{}llccc@{}}
|
| 97 |
+
\toprule
|
| 98 |
+
\textbf{Model} & \textbf{Config} & \textbf{Accuracy} & \textbf{Macro-F1} & \textbf{MCC} \\
|
| 99 |
+
\midrule
|
| 100 |
+
Claude Haiku-4.5 & 3-shot & 0.662$\pm$0.012 & 0.657$\pm$0.015 & 0.592$\pm$0.014 \\
|
| 101 |
+
Claude Haiku-4.5 & zero-shot & 0.660 & 0.652 & 0.581 \\
|
| 102 |
+
Gemini-2.5-Flash & 3-shot & 0.667$\pm$0.014 & 0.663$\pm$0.017 & 0.597$\pm$0.015 \\
|
| 103 |
+
Gemini-2.5-Flash & zero-shot & 0.681 & 0.675 & 0.609 \\
|
| 104 |
+
GPT-4o-mini & 3-shot & 0.625$\pm$0.011 & 0.616$\pm$0.012 & 0.546$\pm$0.012 \\
|
| 105 |
+
GPT-4o-mini & zero-shot & 0.641 & 0.634 & 0.571 \\
|
| 106 |
+
Llama-3.3-70B & 3-shot & 0.634$\pm$0.022 & 0.630$\pm$0.030 & 0.560$\pm$0.026 \\
|
| 107 |
+
Llama-3.3-70B & zero-shot & 0.631 & 0.617 & 0.559 \\
|
| 108 |
+
Qwen2.5-32B & 3-shot & 0.648$\pm$0.017 & 0.642$\pm$0.022 & 0.572$\pm$0.024 \\
|
| 109 |
+
Qwen2.5-32B & zero-shot & 0.654 & 0.641 & 0.579 \\
|
| 110 |
+
\bottomrule
|
| 111 |
+
\end{tabular}
|
| 112 |
+
\end{table}
|
| 113 |
+
|
| 114 |
+
\begin{table}[h]
|
| 115 |
+
\centering
|
| 116 |
+
\caption{CT-L2 (structured extraction from clinical trial evidence).}
|
| 117 |
+
\label{tab:ct_l2_full}
|
| 118 |
+
\scriptsize
|
| 119 |
+
\begin{tabular}{@{}llccc@{}}
|
| 120 |
+
\toprule
|
| 121 |
+
\textbf{Model} & \textbf{Config} & \textbf{Category Acc} & \textbf{Field F1} & \textbf{Schema Compl.} \\
|
| 122 |
+
\midrule
|
| 123 |
+
Claude Haiku-4.5 & 3-shot & 0.738$\pm$0.055 & 0.476$\pm$0.099 & 1.000 \\
|
| 124 |
+
Claude Haiku-4.5 & zero-shot & 0.725 & 0.280 & 1.000 \\
|
| 125 |
+
Gemini-2.5-Flash & 3-shot & 0.742$\pm$0.068 & 0.746$\pm$0.162 & 1.000 \\
|
| 126 |
+
Gemini-2.5-Flash & zero-shot & 0.760 & 0.284 & 1.000 \\
|
| 127 |
+
GPT-4o-mini & 3-shot & 0.715$\pm$0.089 & 0.734$\pm$0.185 & 0.917$\pm$0.001 \\
|
| 128 |
+
GPT-4o-mini & zero-shot & 0.751 & 0.334 & 0.965 \\
|
| 129 |
+
Llama-3.3-70B & 3-shot & 0.752$\pm$0.064 & 0.768$\pm$0.161 & 1.000 \\
|
| 130 |
+
Llama-3.3-70B & zero-shot & 0.762 & 0.315 & 1.000 \\
|
| 131 |
+
Qwen2.5-32B & 3-shot & 0.709$\pm$0.095 & 0.808$\pm$0.162 & 1.000 \\
|
| 132 |
+
Qwen2.5-32B & zero-shot & 0.718 & 0.315 & 1.000 \\
|
| 133 |
+
\bottomrule
|
| 134 |
+
\end{tabular}
|
| 135 |
+
\end{table}
|
| 136 |
+
|
| 137 |
+
\begin{table}[h]
|
| 138 |
+
\centering
|
| 139 |
+
\caption{CT-L3 (reasoning, LLM-as-judge overall score, 1--5 scale).}
|
| 140 |
+
\label{tab:ct_l3_full}
|
| 141 |
+
\scriptsize
|
| 142 |
+
\begin{tabular}{@{}llc@{}}
|
| 143 |
+
\toprule
|
| 144 |
+
\textbf{Model} & \textbf{Config} & \textbf{Overall Score} \\
|
| 145 |
+
\midrule
|
| 146 |
+
Claude Haiku-4.5 & 3-shot & 4.960$\pm$0.007 \\
|
| 147 |
+
Claude Haiku-4.5 & zero-shot & 5.000 \\
|
| 148 |
+
Gemini-2.5-Flash & 3-shot & 4.453$\pm$0.044 \\
|
| 149 |
+
Gemini-2.5-Flash & zero-shot & 5.000 \\
|
| 150 |
+
GPT-4o-mini & 3-shot & 4.743$\pm$0.058 \\
|
| 151 |
+
GPT-4o-mini & zero-shot & 4.661 \\
|
| 152 |
+
Llama-3.3-70B & 3-shot & 4.826$\pm$0.007 \\
|
| 153 |
+
Llama-3.3-70B & zero-shot & 4.997 \\
|
| 154 |
+
Qwen2.5-32B & 3-shot & 4.968$\pm$0.007 \\
|
| 155 |
+
Qwen2.5-32B & zero-shot & 5.000 \\
|
| 156 |
+
\bottomrule
|
| 157 |
+
\end{tabular}
|
| 158 |
+
\end{table}
|
| 159 |
+
|
| 160 |
+
\begin{table}[h]
|
| 161 |
+
\centering
|
| 162 |
+
\caption{CT-L4 (discrimination, tested vs.\ untested clinical trials).}
|
| 163 |
+
\label{tab:ct_l4_full}
|
| 164 |
+
\scriptsize
|
| 165 |
+
\begin{tabular}{@{}llccc@{}}
|
| 166 |
+
\toprule
|
| 167 |
+
\textbf{Model} & \textbf{Config} & \textbf{Accuracy} & \textbf{MCC} & \textbf{Halluc.\ Rate} \\
|
| 168 |
+
\midrule
|
| 169 |
+
Claude Haiku-4.5 & 3-shot & 0.739$\pm$0.019 & 0.502$\pm$0.014 & 1.000 \\
|
| 170 |
+
Claude Haiku-4.5 & zero-shot & 0.750 & 0.514 & 1.000 \\
|
| 171 |
+
Gemini-2.5-Flash & 3-shot & 0.777$\pm$0.011 & 0.563$\pm$0.018 & 1.000 \\
|
| 172 |
+
Gemini-2.5-Flash & zero-shot & 0.748 & 0.496 & 1.000 \\
|
| 173 |
+
GPT-4o-mini & 3-shot & 0.738$\pm$0.008 & 0.485$\pm$0.007 & 1.000 \\
|
| 174 |
+
GPT-4o-mini & zero-shot & 0.744 & 0.491 & 1.000 \\
|
| 175 |
+
Llama-3.3-70B & 3-shot & 0.739$\pm$0.023 & 0.504$\pm$0.036 & 1.000 \\
|
| 176 |
+
Llama-3.3-70B & zero-shot & 0.635 & 0.364 & 1.000 \\
|
| 177 |
+
Qwen2.5-32B & 3-shot & 0.724$\pm$0.017 & 0.484$\pm$0.018 & 1.000 \\
|
| 178 |
+
Qwen2.5-32B & zero-shot & 0.757 & 0.519 & 1.000 \\
|
| 179 |
+
\bottomrule
|
| 180 |
+
\end{tabular}
|
| 181 |
+
\end{table}
|
| 182 |
+
|
| 183 |
+
\clearpage
|
| 184 |
+
|
| 185 |
+
\subsection{PPI LLM Results (80 runs)}
|
| 186 |
+
|
| 187 |
+
PPI models: Claude Haiku-4.5, Gemini-2.5-Flash, GPT-4o-mini, Llama-3.3-70B, Qwen2.5-32B.
|
| 188 |
+
|
| 189 |
+
\begin{table}[h]
|
| 190 |
+
\centering
|
| 191 |
+
\caption{PPI-L1 (MCQ, 4-way evidence quality classification). All 3-shot models achieve $\geq$0.999 accuracy except Qwen2.5-32B. Zero-shot performance drops to 0.75 due to complete failure on direct\_experimental evidence (Class A: 0.0 accuracy), while scoring $\approx$1.0 on Classes B--D.}
|
| 192 |
+
\label{tab:ppi_l1_full}
|
| 193 |
+
\scriptsize
|
| 194 |
+
\begin{tabular}{@{}llccc@{}}
|
| 195 |
+
\toprule
|
| 196 |
+
\textbf{Model} & \textbf{Config} & \textbf{Accuracy} & \textbf{Macro-F1} & \textbf{MCC} \\
|
| 197 |
+
\midrule
|
| 198 |
+
Claude Haiku-4.5 & 3-shot & 0.999$\pm$0.001 & 0.999$\pm$0.001 & 0.999$\pm$0.001 \\
|
| 199 |
+
Claude Haiku-4.5 & zero-shot & 0.750 & 0.667 & 0.730 \\
|
| 200 |
+
Gemini-2.5-Flash & 3-shot & 1.000$\pm$0.001 & 1.000$\pm$0.001 & 0.999$\pm$0.001 \\
|
| 201 |
+
Gemini-2.5-Flash & zero-shot & 0.750 & 0.667 & 0.730 \\
|
| 202 |
+
GPT-4o-mini & 3-shot & 1.000$\pm$0.001 & 1.000$\pm$0.001 & 0.999$\pm$0.001 \\
|
| 203 |
+
GPT-4o-mini & zero-shot & 0.749 & 0.665 & 0.728 \\
|
| 204 |
+
Llama-3.3-70B & 3-shot & 1.000 & 1.000 & 1.000 \\
|
| 205 |
+
Llama-3.3-70B & zero-shot & 0.750 & 0.667 & 0.730 \\
|
| 206 |
+
Qwen2.5-32B & 3-shot & 0.826$\pm$0.069 & 0.792$\pm$0.101 & 0.803$\pm$0.069 \\
|
| 207 |
+
Qwen2.5-32B & zero-shot & 0.750 & 0.667 & 0.730 \\
|
| 208 |
+
\bottomrule
|
| 209 |
+
\end{tabular}
|
| 210 |
+
\end{table}
|
| 211 |
+
|
| 212 |
+
\begin{table}[h]
|
| 213 |
+
\centering
|
| 214 |
+
\caption{PPI-L2 (extraction, protein pair identification). Near-perfect entity and count extraction; method and interaction strength require explicit evidence (zero-shot method accuracy is 0.000 for all models).}
|
| 215 |
+
\label{tab:ppi_l2_full}
|
| 216 |
+
\scriptsize
|
| 217 |
+
\begin{tabular}{@{}llccccc@{}}
|
| 218 |
+
\toprule
|
| 219 |
+
\textbf{Model} & \textbf{Config} & \textbf{Entity F1} & \textbf{Count Acc} & \textbf{Schema Compl.} & \textbf{Method Acc} & \textbf{Strength Acc} \\
|
| 220 |
+
\midrule
|
| 221 |
+
Claude Haiku-4.5 & 3-shot & 1.000 & 1.000 & 1.000 & 0.088$\pm$0.139 & 0.522$\pm$0.151 \\
|
| 222 |
+
Claude Haiku-4.5 & zero-shot & 1.000 & 1.000 & 1.000 & 0.000 & 0.594 \\
|
| 223 |
+
Gemini-2.5-Flash & 3-shot & 1.000 & 1.000 & 1.000 & 1.000 & 0.399$\pm$0.219 \\
|
| 224 |
+
Gemini-2.5-Flash & zero-shot & 0.952 & 1.000 & 0.902 & 0.000 & 0.554 \\
|
| 225 |
+
GPT-4o-mini & 3-shot & 0.999$\pm$0.001 & 1.000 & 1.000 & 0.937$\pm$0.108 & 0.425$\pm$0.182 \\
|
| 226 |
+
GPT-4o-mini & zero-shot & 0.999 & 1.000 & 1.000 & 0.000 & 0.307 \\
|
| 227 |
+
Llama-3.3-70B & 3-shot & 1.000 & 1.000 & 1.000 & 0.082$\pm$0.143 & 0.608$\pm$0.019 \\
|
| 228 |
+
Llama-3.3-70B & zero-shot & 1.000 & 1.000 & 1.000 & 0.000 & 0.572 \\
|
| 229 |
+
Qwen2.5-32B & 3-shot & 0.998$\pm$0.001 & 1.000 & 1.000 & 0.938$\pm$0.108 & 0.501$\pm$0.166 \\
|
| 230 |
+
Qwen2.5-32B & zero-shot & 0.999 & 1.000 & 1.000 & 0.000 & 0.510 \\
|
| 231 |
+
\bottomrule
|
| 232 |
+
\end{tabular}
|
| 233 |
+
\end{table}
|
| 234 |
+
|
| 235 |
+
\begin{table}[h]
|
| 236 |
+
\centering
|
| 237 |
+
\caption{PPI-L3 (reasoning, LLM-as-judge overall score, 1--5 scale). Llama-3.3-70B 3-shot has 51.5\% error rate; score shown for successful completions only ($\dagger$).}
|
| 238 |
+
\label{tab:ppi_l3_full}
|
| 239 |
+
\scriptsize
|
| 240 |
+
\begin{tabular}{@{}llc@{}}
|
| 241 |
+
\toprule
|
| 242 |
+
\textbf{Model} & \textbf{Config} & \textbf{Overall Score} \\
|
| 243 |
+
\midrule
|
| 244 |
+
Claude Haiku-4.5 & 3-shot & 3.70$\pm$0.25 \\
|
| 245 |
+
Claude Haiku-4.5 & zero-shot & 4.683 \\
|
| 246 |
+
Gemini-2.5-Flash & 3-shot & 3.11$\pm$0.10 \\
|
| 247 |
+
Gemini-2.5-Flash & zero-shot & 4.645 \\
|
| 248 |
+
GPT-4o-mini & 3-shot & 3.21$\pm$0.10 \\
|
| 249 |
+
GPT-4o-mini & zero-shot & 4.361 \\
|
| 250 |
+
Llama-3.3-70B & 3-shot & 2.05$\pm$0.92$^\dagger$ \\
|
| 251 |
+
Llama-3.3-70B & zero-shot & 4.281 \\
|
| 252 |
+
Qwen2.5-32B & 3-shot & 3.51$\pm$0.04 \\
|
| 253 |
+
Qwen2.5-32B & zero-shot & 4.452 \\
|
| 254 |
+
\bottomrule
|
| 255 |
+
\end{tabular}
|
| 256 |
+
\end{table}
|
| 257 |
+
|
| 258 |
+
\begin{table}[h]
|
| 259 |
+
\centering
|
| 260 |
+
\caption{PPI-L4 (discrimination, tested vs.\ untested protein pairs). Cit.\ Rate = fraction of responses including any evidence citation (all cited evidence is fabricated; hallucination rate = 100\%).}
|
| 261 |
+
\label{tab:ppi_l4_full}
|
| 262 |
+
\scriptsize
|
| 263 |
+
\begin{tabular}{@{}llccc@{}}
|
| 264 |
+
\toprule
|
| 265 |
+
\textbf{Model} & \textbf{Config} & \textbf{Accuracy} & \textbf{MCC} & \textbf{Cit.\ Rate} \\
|
| 266 |
+
\midrule
|
| 267 |
+
Claude Haiku-4.5 & 3-shot & 0.648$\pm$0.010 & 0.390$\pm$0.020 & 1.000 \\
|
| 268 |
+
Claude Haiku-4.5 & zero-shot & 0.608 & 0.334 & 1.000 \\
|
| 269 |
+
Gemini-2.5-Flash & 3-shot & 0.671$\pm$0.006 & 0.382$\pm$0.004 & 1.000 \\
|
| 270 |
+
Gemini-2.5-Flash & zero-shot & 0.647 & 0.358 & 1.000 \\
|
| 271 |
+
GPT-4o-mini & 3-shot & 0.633$\pm$0.025 & 0.352$\pm$0.039 & 0.888$\pm$0.036 \\
|
| 272 |
+
GPT-4o-mini & zero-shot & 0.699 & 0.430 & 1.000 \\
|
| 273 |
+
Llama-3.3-70B & 3-shot & 0.637$\pm$0.046 & 0.371$\pm$0.056 & 0.978$\pm$0.001 \\
|
| 274 |
+
Llama-3.3-70B & zero-shot & 0.703 & 0.441 & 1.000 \\
|
| 275 |
+
Qwen2.5-32B & 3-shot & 0.641$\pm$0.010 & 0.369$\pm$0.009 & 0.467$\pm$0.032 \\
|
| 276 |
+
Qwen2.5-32B & zero-shot & 0.645 & 0.366 & 1.000 \\
|
| 277 |
+
\bottomrule
|
| 278 |
+
\end{tabular}
|
| 279 |
+
\end{table}
|
paper/appendix/app_ml_tables.tex
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\section{Complete ML Results}
|
| 2 |
+
\label{app:ml_tables}
|
| 3 |
+
|
| 4 |
+
This appendix presents complete per-run ML results for all 180 experiments across three domains. DTI uses a single seed (42); CT and PPI use three seeds (42, 43, 44). PPI results are reported as mean $\pm$ std across seeds.
|
| 5 |
+
|
| 6 |
+
\subsection{DTI ML Results (18 runs)}
|
| 7 |
+
|
| 8 |
+
\begin{table}[h]
|
| 9 |
+
\centering
|
| 10 |
+
\caption{DTI ML results: 3 models $\times$ 6 configurations, seed 42.}
|
| 11 |
+
\label{tab:dti_ml_full}
|
| 12 |
+
\scriptsize
|
| 13 |
+
\begin{tabular}{@{}llllcccc@{}}
|
| 14 |
+
\toprule
|
| 15 |
+
\textbf{Model} & \textbf{Split} & \textbf{Negatives} & \textbf{LogAUC} & \textbf{AUPRC} & \textbf{MCC} & \textbf{AUROC} \\
|
| 16 |
+
\midrule
|
| 17 |
+
DeepDTA & random & negbiodb & 0.833 & 0.997 & 0.976 & 0.997 \\
|
| 18 |
+
DeepDTA & random & uniform\_random & 0.824 & 0.995 & 0.939 & 0.994 \\
|
| 19 |
+
DeepDTA & random & degree\_matched & 0.919 & 0.998 & 0.980 & 0.998 \\
|
| 20 |
+
DeepDTA & cold\_compound & negbiodb & 0.792 & 0.995 & 0.975 & 0.996 \\
|
| 21 |
+
DeepDTA & cold\_target & negbiodb & 0.325 & 0.901 & 0.041 & 0.887 \\
|
| 22 |
+
DeepDTA & ddb & negbiodb & 0.824 & 0.996 & 0.975 & 0.997 \\
|
| 23 |
+
\midrule
|
| 24 |
+
GraphDTA & random & negbiodb & 0.843 & 0.997 & 0.977 & 0.997 \\
|
| 25 |
+
GraphDTA & random & uniform\_random & 0.888 & 0.996 & 0.947 & 0.996 \\
|
| 26 |
+
GraphDTA & random & degree\_matched & 0.967 & 0.999 & 0.981 & 0.999 \\
|
| 27 |
+
GraphDTA & cold\_compound & negbiodb & 0.823 & 0.996 & 0.976 & 0.997 \\
|
| 28 |
+
GraphDTA & cold\_target & negbiodb & 0.241 & 0.871 & 0.098 & 0.863 \\
|
| 29 |
+
GraphDTA & ddb & negbiodb & 0.840 & 0.997 & 0.977 & 0.997 \\
|
| 30 |
+
\midrule
|
| 31 |
+
DrugBAN & random & negbiodb & 0.830 & 0.996 & 0.975 & 0.997 \\
|
| 32 |
+
DrugBAN & random & uniform\_random & 0.825 & 0.995 & 0.933 & 0.994 \\
|
| 33 |
+
DrugBAN & random & degree\_matched & 0.955 & 0.999 & 0.980 & 0.999 \\
|
| 34 |
+
DrugBAN & cold\_compound & negbiodb & 0.828 & 0.996 & 0.976 & 0.997 \\
|
| 35 |
+
DrugBAN & cold\_target & negbiodb & 0.151 & 0.782 & 0.186 & 0.760 \\
|
| 36 |
+
DrugBAN & ddb & negbiodb & 0.828 & 0.996 & 0.975 & 0.997 \\
|
| 37 |
+
\bottomrule
|
| 38 |
+
\end{tabular}
|
| 39 |
+
\end{table}
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
\subsection{CT-M1 Results (54 runs)}
|
| 43 |
+
|
| 44 |
+
\begin{table}[h]
|
| 45 |
+
\centering
|
| 46 |
+
\caption{CT-M1 binary classification results: 3 models $\times$ 6 splits $\times$ 3 seeds. Temporal split produces single-class validation sets (all negative), yielding undefined metrics (---).}
|
| 47 |
+
\label{tab:ct_m1_full}
|
| 48 |
+
\scriptsize
|
| 49 |
+
\begin{tabular}{@{}lllrccccc@{}}
|
| 50 |
+
\toprule
|
| 51 |
+
\textbf{Model} & \textbf{Split} & \textbf{Neg.} & \textbf{Seed} & \textbf{AUROC} & \textbf{AUPRC} & \textbf{MCC} & \textbf{LogAUC} & \textbf{F1} \\
|
| 52 |
+
\midrule
|
| 53 |
+
\multirow{18}{*}{GNN}
|
| 54 |
+
& cold\_cond & negbiodb & 42 & 1.000 & 1.000 & 0.994 & 0.998 & 0.997 \\
|
| 55 |
+
& cold\_cond & negbiodb & 43 & 1.000 & 1.000 & 0.990 & 0.991 & 0.995 \\
|
| 56 |
+
& cold\_cond & negbiodb & 44 & 1.000 & 1.000 & 0.990 & 0.989 & 0.995 \\
|
| 57 |
+
& cold\_drug & negbiodb & 42 & 1.000 & 1.000 & 0.993 & 0.999 & 0.997 \\
|
| 58 |
+
& cold\_drug & negbiodb & 43 & 1.000 & 1.000 & 0.993 & 0.999 & 0.997 \\
|
| 59 |
+
& cold\_drug & negbiodb & 44 & 1.000 & 1.000 & 0.991 & 0.999 & 0.995 \\
|
| 60 |
+
& random & deg\_match & 42 & 0.724 & 0.724 & 0.393 & 0.170 & 0.636 \\
|
| 61 |
+
& random & deg\_match & 43 & 0.768 & 0.737 & 0.454 & 0.137 & 0.639 \\
|
| 62 |
+
& random & deg\_match & 44 & 0.781 & 0.741 & 0.473 & 0.132 & 0.657 \\
|
| 63 |
+
& random & negbiodb & 42 & 1.000 & 1.000 & 1.000 & 1.000 & 1.000 \\
|
| 64 |
+
& random & negbiodb & 43 & 1.000 & 1.000 & 1.000 & 1.000 & 1.000 \\
|
| 65 |
+
& random & negbiodb & 44 & 1.000 & 1.000 & 1.000 & 1.000 & 1.000 \\
|
| 66 |
+
& random & unif\_rand & 42 & 0.891 & 0.914 & 0.609 & 0.305 & 0.841 \\
|
| 67 |
+
& random & unif\_rand & 43 & 0.899 & 0.922 & 0.638 & 0.331 & 0.851 \\
|
| 68 |
+
& random & unif\_rand & 44 & 0.893 & 0.922 & 0.599 & 0.373 & 0.835 \\
|
| 69 |
+
& temporal & negbiodb & 42 & --- & --- & --- & --- & --- \\
|
| 70 |
+
& temporal & negbiodb & 43 & --- & --- & --- & --- & --- \\
|
| 71 |
+
& temporal & negbiodb & 44 & --- & --- & --- & --- & --- \\
|
| 72 |
+
\midrule
|
| 73 |
+
\multirow{18}{*}{MLP}
|
| 74 |
+
& cold\_cond & negbiodb & 42 & 1.000 & 1.000 & 0.995 & 0.989 & 0.997 \\
|
| 75 |
+
& cold\_cond & negbiodb & 43 & 0.999 & 0.999 & 0.990 & 0.980 & 0.995 \\
|
| 76 |
+
& cold\_cond & negbiodb & 44 & 1.000 & 1.000 & 0.984 & 0.991 & 0.992 \\
|
| 77 |
+
& cold\_drug & negbiodb & 42 & 1.000 & 0.999 & 0.991 & 0.995 & 0.996 \\
|
| 78 |
+
& cold\_drug & negbiodb & 43 & 1.000 & 0.999 & 0.996 & 0.993 & 0.998 \\
|
| 79 |
+
& cold\_drug & negbiodb & 44 & 1.000 & 0.999 & 0.996 & 0.997 & 0.998 \\
|
| 80 |
+
& random & deg\_match & 42 & 0.799 & 0.794 & 0.447 & 0.179 & 0.729 \\
|
| 81 |
+
& random & deg\_match & 43 & 0.803 & 0.802 & 0.462 & 0.195 & 0.728 \\
|
| 82 |
+
& random & deg\_match & 44 & 0.802 & 0.800 & 0.454 & 0.189 & 0.731 \\
|
| 83 |
+
& random & negbiodb & 42 & 1.000 & 1.000 & 0.994 & 0.990 & 0.999 \\
|
| 84 |
+
& random & negbiodb & 43 & 1.000 & 1.000 & 0.994 & 0.993 & 0.999 \\
|
| 85 |
+
& random & negbiodb & 44 & 1.000 & 1.000 & 0.988 & 0.996 & 0.998 \\
|
| 86 |
+
& random & unif\_rand & 42 & 0.884 & 0.886 & 0.597 & 0.343 & 0.796 \\
|
| 87 |
+
& random & unif\_rand & 43 & 0.888 & 0.889 & 0.609 & 0.366 & 0.807 \\
|
| 88 |
+
& random & unif\_rand & 44 & 0.884 & 0.888 & 0.592 & 0.387 & 0.786 \\
|
| 89 |
+
& temporal & negbiodb & 42 & --- & --- & --- & --- & --- \\
|
| 90 |
+
& temporal & negbiodb & 43 & --- & --- & --- & --- & --- \\
|
| 91 |
+
& temporal & negbiodb & 44 & --- & --- & --- & --- & --- \\
|
| 92 |
+
\midrule
|
| 93 |
+
\multirow{18}{*}{XGBoost}
|
| 94 |
+
& cold\_cond & negbiodb & 42 & 1.000 & 1.000 & 1.000 & 1.000 & 1.000 \\
|
| 95 |
+
& cold\_cond & negbiodb & 43 & 1.000 & 1.000 & 1.000 & 1.000 & 1.000 \\
|
| 96 |
+
& cold\_cond & negbiodb & 44 & 1.000 & 1.000 & 1.000 & 1.000 & 1.000 \\
|
| 97 |
+
& cold\_drug & negbiodb & 42 & 1.000 & 1.000 & 0.999 & 1.000 & 1.000 \\
|
| 98 |
+
& cold\_drug & negbiodb & 43 & 1.000 & 1.000 & 0.999 & 1.000 & 1.000 \\
|
| 99 |
+
& cold\_drug & negbiodb & 44 & 1.000 & 1.000 & 0.999 & 1.000 & 1.000 \\
|
| 100 |
+
& random & deg\_match & 42 & 0.844 & 0.846 & 0.553 & 0.260 & 0.772 \\
|
| 101 |
+
& random & deg\_match & 43 & 0.844 & 0.846 & 0.553 & 0.260 & 0.772 \\
|
| 102 |
+
& random & deg\_match & 44 & 0.844 & 0.846 & 0.553 & 0.260 & 0.772 \\
|
| 103 |
+
& random & negbiodb & 42 & 1.000 & 1.000 & 1.000 & 1.000 & 1.000 \\
|
| 104 |
+
& random & negbiodb & 43 & 1.000 & 1.000 & 1.000 & 1.000 & 1.000 \\
|
| 105 |
+
& random & negbiodb & 44 & 1.000 & 1.000 & 1.000 & 1.000 & 1.000 \\
|
| 106 |
+
& random & unif\_rand & 42 & 0.905 & 0.907 & 0.643 & 0.423 & 0.821 \\
|
| 107 |
+
& random & unif\_rand & 43 & 0.905 & 0.907 & 0.643 & 0.423 & 0.821 \\
|
| 108 |
+
& random & unif\_rand & 44 & 0.905 & 0.907 & 0.643 & 0.423 & 0.821 \\
|
| 109 |
+
& temporal & negbiodb & 42 & --- & --- & --- & --- & --- \\
|
| 110 |
+
& temporal & negbiodb & 43 & --- & --- & --- & --- & --- \\
|
| 111 |
+
& temporal & negbiodb & 44 & --- & --- & --- & --- & --- \\
|
| 112 |
+
\bottomrule
|
| 113 |
+
\end{tabular}
|
| 114 |
+
\end{table}
|
| 115 |
+
|
| 116 |
+
\clearpage
|
| 117 |
+
|
| 118 |
+
\subsection{CT-M2 Results (54 runs)}
|
| 119 |
+
|
| 120 |
+
\begin{table}[h]
|
| 121 |
+
\centering
|
| 122 |
+
\caption{CT-M2 seven-way failure category prediction: 3 models $\times$ 6 splits $\times$ 3 seeds. XGBoost results are deterministic across seeds.}
|
| 123 |
+
\label{tab:ct_m2_full}
|
| 124 |
+
\scriptsize
|
| 125 |
+
\begin{tabular}{@{}llrccccc@{}}
|
| 126 |
+
\toprule
|
| 127 |
+
\textbf{Model} & \textbf{Split} & \textbf{Seed} & \textbf{Macro-F1} & \textbf{Wtd-F1} & \textbf{MCC} & \textbf{Acc} \\
|
| 128 |
+
\midrule
|
| 129 |
+
\multirow{18}{*}{GNN}
|
| 130 |
+
& cold\_condition & 42 & 0.379 & 0.640 & 0.475 & 0.610 \\
|
| 131 |
+
& cold\_condition & 43 & 0.373 & 0.633 & 0.464 & 0.602 \\
|
| 132 |
+
& cold\_condition & 44 & 0.377 & 0.629 & 0.460 & 0.596 \\
|
| 133 |
+
& cold\_drug & 42 & 0.232 & 0.588 & 0.374 & 0.559 \\
|
| 134 |
+
& cold\_drug & 43 & 0.229 & 0.583 & 0.372 & 0.561 \\
|
| 135 |
+
& cold\_drug & 44 & 0.245 & 0.547 & 0.337 & 0.490 \\
|
| 136 |
+
& degree\_balanced & 42 & 0.458 & 0.684 & 0.538 & 0.661 \\
|
| 137 |
+
& degree\_balanced & 43 & 0.471 & 0.693 & 0.552 & 0.674 \\
|
| 138 |
+
& degree\_balanced & 44 & 0.453 & 0.673 & 0.526 & 0.651 \\
|
| 139 |
+
& random & 42 & 0.459 & 0.669 & 0.520 & 0.643 \\
|
| 140 |
+
& random & 43 & 0.476 & 0.674 & 0.530 & 0.653 \\
|
| 141 |
+
& random & 44 & 0.469 & 0.674 & 0.529 & 0.657 \\
|
| 142 |
+
& scaffold & 42 & 0.183 & 0.496 & 0.240 & 0.439 \\
|
| 143 |
+
& scaffold & 43 & 0.207 & 0.547 & 0.305 & 0.532 \\
|
| 144 |
+
& scaffold & 44 & 0.185 & 0.528 & 0.267 & 0.521 \\
|
| 145 |
+
& temporal & 42 & 0.245 & 0.608 & 0.403 & 0.572 \\
|
| 146 |
+
& temporal & 43 & 0.225 & 0.573 & 0.347 & 0.514 \\
|
| 147 |
+
& temporal & 44 & 0.228 & 0.566 & 0.350 & 0.515 \\
|
| 148 |
+
\midrule
|
| 149 |
+
\multirow{18}{*}{MLP}
|
| 150 |
+
& cold\_condition & 42 & 0.271 & 0.577 & 0.369 & 0.520 \\
|
| 151 |
+
& cold\_condition & 43 & 0.270 & 0.582 & 0.374 & 0.528 \\
|
| 152 |
+
& cold\_condition & 44 & 0.266 & 0.576 & 0.369 & 0.520 \\
|
| 153 |
+
& cold\_drug & 42 & 0.276 & 0.546 & 0.339 & 0.496 \\
|
| 154 |
+
& cold\_drug & 43 & 0.287 & 0.551 & 0.344 & 0.511 \\
|
| 155 |
+
& cold\_drug & 44 & 0.243 & 0.495 & 0.292 & 0.435 \\
|
| 156 |
+
& degree\_balanced & 42 & 0.362 & 0.630 & 0.445 & 0.586 \\
|
| 157 |
+
& degree\_balanced & 43 & 0.354 & 0.622 & 0.435 & 0.579 \\
|
| 158 |
+
& degree\_balanced & 44 & 0.347 & 0.615 & 0.423 & 0.568 \\
|
| 159 |
+
& random & 42 & 0.347 & 0.611 & 0.418 & 0.569 \\
|
| 160 |
+
& random & 43 & 0.368 & 0.630 & 0.447 & 0.594 \\
|
| 161 |
+
& random & 44 & 0.358 & 0.617 & 0.430 & 0.573 \\
|
| 162 |
+
& scaffold & 42 & 0.204 & 0.531 & 0.266 & 0.517 \\
|
| 163 |
+
& scaffold & 43 & 0.186 & 0.498 & 0.226 & 0.482 \\
|
| 164 |
+
& scaffold & 44 & 0.198 & 0.530 & 0.270 & 0.542 \\
|
| 165 |
+
& temporal & 42 & 0.202 & 0.517 & 0.284 & 0.454 \\
|
| 166 |
+
& temporal & 43 & 0.210 & 0.553 & 0.311 & 0.498 \\
|
| 167 |
+
& temporal & 44 & 0.212 & 0.544 & 0.304 & 0.486 \\
|
| 168 |
+
\midrule
|
| 169 |
+
\multirow{18}{*}{XGBoost}
|
| 170 |
+
& cold\_condition & 42--44 & 0.338 & 0.686 & 0.570 & 0.725 \\
|
| 171 |
+
& cold\_drug & 42--44 & 0.414 & 0.683 & 0.555 & 0.715 \\
|
| 172 |
+
& degree\_balanced & 42--44 & 0.521 & 0.758 & 0.645 & 0.776 \\
|
| 173 |
+
& random & 42--44 & 0.510 & 0.751 & 0.637 & 0.771 \\
|
| 174 |
+
& scaffold & 42--44 & 0.193 & 0.567 & 0.374 & 0.640 \\
|
| 175 |
+
& temporal & 42--44 & 0.193 & 0.602 & 0.454 & 0.669 \\
|
| 176 |
+
\bottomrule
|
| 177 |
+
\end{tabular}
|
| 178 |
+
\end{table}
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
\subsection{PPI ML Results (54 runs, aggregated)}
|
| 182 |
+
|
| 183 |
+
\begin{table}[h]
|
| 184 |
+
\centering
|
| 185 |
+
\caption{PPI ML results: 3 models $\times$ 6 configurations, mean $\pm$ std over 3 seeds.}
|
| 186 |
+
\label{tab:ppi_ml_full}
|
| 187 |
+
\scriptsize
|
| 188 |
+
\begin{tabular}{@{}lllcccc@{}}
|
| 189 |
+
\toprule
|
| 190 |
+
\textbf{Model} & \textbf{Split} & \textbf{Negatives} & \textbf{LogAUC} & \textbf{AUPRC} & \textbf{MCC} & \textbf{AUROC} \\
|
| 191 |
+
\midrule
|
| 192 |
+
SiameseCNN & random & negbiodb & .517$\pm$.018 & .961$\pm$.001 & .794$\pm$.012 & .963$\pm$.000 \\
|
| 193 |
+
SiameseCNN & random & unif\_rand & .552$\pm$.002 & .964$\pm$.001 & .806$\pm$.007 & .965$\pm$.001 \\
|
| 194 |
+
SiameseCNN & random & deg\_match & .548$\pm$.011 & .963$\pm$.001 & .803$\pm$.005 & .964$\pm$.001 \\
|
| 195 |
+
SiameseCNN & cold\_protein & negbiodb & .314$\pm$.014 & .880$\pm$.003 & .568$\pm$.019 & .873$\pm$.002 \\
|
| 196 |
+
SiameseCNN & cold\_both & negbiodb & .037$\pm$.010 & .702$\pm$.031 & .070$\pm$.004 & .585$\pm$.040 \\
|
| 197 |
+
SiameseCNN & ddb & negbiodb & .534$\pm$.011 & .961$\pm$.001 & .795$\pm$.004 & .962$\pm$.001 \\
|
| 198 |
+
\midrule
|
| 199 |
+
PIPR & random & negbiodb & .519$\pm$.009 & .962$\pm$.000 & .812$\pm$.006 & .964$\pm$.001 \\
|
| 200 |
+
PIPR & random & unif\_rand & .565$\pm$.005 & .966$\pm$.000 & .810$\pm$.002 & .966$\pm$.000 \\
|
| 201 |
+
PIPR & random & deg\_match & .550$\pm$.009 & .965$\pm$.001 & .817$\pm$.006 & .966$\pm$.001 \\
|
| 202 |
+
PIPR & cold\_protein & negbiodb & .288$\pm$.010 & .869$\pm$.006 & .565$\pm$.019 & .859$\pm$.008 \\
|
| 203 |
+
PIPR & cold\_both & negbiodb & .031$\pm$.019 & .610$\pm$.055 & $-$.018$\pm$.044 & .409$\pm$.077 \\
|
| 204 |
+
PIPR & ddb & negbiodb & .537$\pm$.009 & .962$\pm$.000 & .808$\pm$.003 & .964$\pm$.000 \\
|
| 205 |
+
\midrule
|
| 206 |
+
MLPFeatures & random & negbiodb & .567$\pm$.005 & .962$\pm$.001 & .788$\pm$.003 & .962$\pm$.001 \\
|
| 207 |
+
MLPFeatures & random & unif\_rand & .539$\pm$.175 & .949$\pm$.043 & .766$\pm$.118 & .948$\pm$.044 \\
|
| 208 |
+
MLPFeatures & random & deg\_match & .458$\pm$.059 & .934$\pm$.013 & .716$\pm$.033 & .930$\pm$.012 \\
|
| 209 |
+
MLPFeatures & cold\_protein & negbiodb & .476$\pm$.005 & .935$\pm$.001 & .706$\pm$.005 & .931$\pm$.001 \\
|
| 210 |
+
MLPFeatures & cold\_both & negbiodb & .595$\pm$.051 & .973$\pm$.010 & .749$\pm$.043 & .950$\pm$.021 \\
|
| 211 |
+
MLPFeatures & ddb & negbiodb & .564$\pm$.005 & .961$\pm$.000 & .787$\pm$.001 & .961$\pm$.000 \\
|
| 212 |
+
\bottomrule
|
| 213 |
+
\end{tabular}
|
| 214 |
+
\end{table}
|
paper/appendix/app_prompts.tex
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\section{LLM Prompt Examples}
|
| 2 |
+
\label{app:prompts}
|
| 3 |
+
|
| 4 |
+
This appendix shows representative prompt templates for each evaluation level across all three domains. Each domain uses a domain-specific system prompt and task-specific user prompts. All prompts follow a zero-shot and 3-shot structure.
|
| 5 |
+
|
| 6 |
+
\subsection{System Prompts}
|
| 7 |
+
|
| 8 |
+
\begin{small}
|
| 9 |
+
\begin{verbatim}
|
| 10 |
+
DTI: "You are a pharmaceutical scientist with expertise in drug-target
|
| 11 |
+
interactions, assay development, and medicinal chemistry. Provide precise,
|
| 12 |
+
evidence-based answers."
|
| 13 |
+
|
| 14 |
+
CT: "You are a clinical trial expert with deep knowledge of drug
|
| 15 |
+
development, regulatory science, and clinical pharmacology."
|
| 16 |
+
|
| 17 |
+
PPI: "You are a protein biochemist with expertise in protein-protein
|
| 18 |
+
interactions, structural biology, and proteomics experimental methods.
|
| 19 |
+
Provide precise, evidence-based answers."
|
| 20 |
+
\end{verbatim}
|
| 21 |
+
\end{small}
|
| 22 |
+
|
| 23 |
+
\subsection{L1: Multiple Choice Classification}
|
| 24 |
+
|
| 25 |
+
\textbf{DTI L1} (4-way: hard negative / conditional negative / methodological negative / dose-time negative):
|
| 26 |
+
\begin{small}
|
| 27 |
+
\begin{verbatim}
|
| 28 |
+
[context_text with evidence description]
|
| 29 |
+
|
| 30 |
+
Respond with ONLY the letter of the correct answer: A, B, C, or D.
|
| 31 |
+
\end{verbatim}
|
| 32 |
+
\end{small}
|
| 33 |
+
|
| 34 |
+
\textbf{CT L1} (5-way: safety / efficacy / enrollment / strategic / other):
|
| 35 |
+
\begin{small}
|
| 36 |
+
\begin{verbatim}
|
| 37 |
+
Based on the clinical trial information below, classify the primary
|
| 38 |
+
reason for this trial's failure.
|
| 39 |
+
|
| 40 |
+
[context_text with trial details]
|
| 41 |
+
|
| 42 |
+
Categories:
|
| 43 |
+
A) Safety -- Trial failed due to drug toxicity, adverse events, or
|
| 44 |
+
safety signals
|
| 45 |
+
B) Efficacy -- Trial failed to demonstrate therapeutic benefit vs control
|
| 46 |
+
C) Enrollment -- Trial failed to recruit sufficient participants
|
| 47 |
+
D) Strategic -- Trial was discontinued for business, strategic, or
|
| 48 |
+
portfolio reasons
|
| 49 |
+
E) Other -- Trial failed due to study design flaws, regulatory issues,
|
| 50 |
+
or other reasons
|
| 51 |
+
|
| 52 |
+
Respond with ONLY a single letter (A, B, C, D, or E).
|
| 53 |
+
\end{verbatim}
|
| 54 |
+
\end{small}
|
| 55 |
+
|
| 56 |
+
\textbf{PPI L1} (4-way: direct experimental / systematic screen / computational inference / database score absence):
|
| 57 |
+
\begin{small}
|
| 58 |
+
\begin{verbatim}
|
| 59 |
+
Based on the evidence description below, classify the type and quality
|
| 60 |
+
of evidence supporting this protein non-interaction.
|
| 61 |
+
|
| 62 |
+
[context_text with evidence description]
|
| 63 |
+
|
| 64 |
+
Categories:
|
| 65 |
+
A) Direct experimental -- A specific binding assay (co-IP, pulldown,
|
| 66 |
+
SPR, etc.) found no physical interaction
|
| 67 |
+
B) Systematic screen -- A high-throughput binary screen (Y2H, LUMIER,
|
| 68 |
+
etc.) found no interaction
|
| 69 |
+
C) Computational inference -- ML analysis of co-fractionation or complex
|
| 70 |
+
data predicts no interaction
|
| 71 |
+
D) Database score absence -- Zero or negligible combined interaction
|
| 72 |
+
score across multiple evidence channels
|
| 73 |
+
|
| 74 |
+
Respond with ONLY a single letter (A, B, C, or D).
|
| 75 |
+
\end{verbatim}
|
| 76 |
+
\end{small}
|
| 77 |
+
|
| 78 |
+
\subsection{L2: Structured Extraction}
|
| 79 |
+
|
| 80 |
+
\textbf{DTI L2} (JSON extraction from abstract):
|
| 81 |
+
\begin{small}
|
| 82 |
+
\begin{verbatim}
|
| 83 |
+
Extract all negative drug-target interaction results from the following
|
| 84 |
+
abstract.
|
| 85 |
+
|
| 86 |
+
Abstract: [abstract_text]
|
| 87 |
+
|
| 88 |
+
For each negative result found, extract:
|
| 89 |
+
- compound: compound/drug name
|
| 90 |
+
- target: target protein/gene name
|
| 91 |
+
- target_uniprot: UniProt accession (if determinable)
|
| 92 |
+
- activity_type: type of measurement (IC50, Ki, Kd, EC50, etc.)
|
| 93 |
+
- activity_value: reported value with units
|
| 94 |
+
- activity_relation: relation (=, >, <, ~)
|
| 95 |
+
- assay_format: biochemical, cell-based, or in vivo
|
| 96 |
+
- outcome: inactive, weak, or inconclusive
|
| 97 |
+
|
| 98 |
+
Also report:
|
| 99 |
+
- total_inactive_count: total number of inactive results mentioned
|
| 100 |
+
- positive_results_mentioned: true/false
|
| 101 |
+
|
| 102 |
+
Respond in JSON format.
|
| 103 |
+
\end{verbatim}
|
| 104 |
+
\end{small}
|
| 105 |
+
|
| 106 |
+
\textbf{CT L2} (JSON extraction from termination report):
|
| 107 |
+
\begin{small}
|
| 108 |
+
\begin{verbatim}
|
| 109 |
+
Extract structured failure information from the following clinical trial
|
| 110 |
+
termination report. Return a JSON object with the fields specified below.
|
| 111 |
+
|
| 112 |
+
[context_text]
|
| 113 |
+
|
| 114 |
+
Required JSON fields:
|
| 115 |
+
- failure_category: one of [efficacy, safety, pharmacokinetic,
|
| 116 |
+
enrollment, strategic, design, regulatory, other]
|
| 117 |
+
- failure_subcategory: specific reason
|
| 118 |
+
- affected_system: organ system affected (null if not applicable)
|
| 119 |
+
- severity_indicator: one of [mild, moderate, severe, fatal, null]
|
| 120 |
+
- quantitative_evidence: true if text mentions specific numbers
|
| 121 |
+
- decision_maker: who terminated [sponsor, dsmb, regulatory,
|
| 122 |
+
investigator, null]
|
| 123 |
+
- patient_impact: brief description of patient safety impact
|
| 124 |
+
|
| 125 |
+
Return ONLY valid JSON, no additional text.
|
| 126 |
+
\end{verbatim}
|
| 127 |
+
\end{small}
|
| 128 |
+
|
| 129 |
+
\textbf{PPI L2} (JSON extraction of non-interacting pairs):
|
| 130 |
+
\begin{small}
|
| 131 |
+
\begin{verbatim}
|
| 132 |
+
Extract all protein pairs reported as non-interacting from the following
|
| 133 |
+
evidence summary. Return a JSON object with the fields specified below.
|
| 134 |
+
|
| 135 |
+
[context_text]
|
| 136 |
+
|
| 137 |
+
Required JSON fields:
|
| 138 |
+
- non_interacting_pairs: list of objects, each with:
|
| 139 |
+
- protein_1: gene symbol or UniProt accession
|
| 140 |
+
- protein_2: gene symbol or UniProt accession
|
| 141 |
+
- method: experimental method used
|
| 142 |
+
- evidence_strength: one of [strong, moderate, weak]
|
| 143 |
+
- total_negative_count: total number of non-interacting pairs mentioned
|
| 144 |
+
- positive_interactions_mentioned: true if any positive interactions
|
| 145 |
+
are also mentioned
|
| 146 |
+
|
| 147 |
+
Return ONLY valid JSON, no additional text.
|
| 148 |
+
\end{verbatim}
|
| 149 |
+
\end{small}
|
| 150 |
+
|
| 151 |
+
\subsection{L3: Scientific Reasoning}
|
| 152 |
+
|
| 153 |
+
\textbf{DTI L3}:
|
| 154 |
+
\begin{small}
|
| 155 |
+
\begin{verbatim}
|
| 156 |
+
[context_text describing compound-target pair and inactivity evidence]
|
| 157 |
+
|
| 158 |
+
Provide a detailed scientific explanation (3-5 paragraphs) covering:
|
| 159 |
+
1. Structural compatibility between compound and target binding site
|
| 160 |
+
2. Known selectivity profile and mechanism of action
|
| 161 |
+
3. Relevant SAR (structure-activity relationship) data
|
| 162 |
+
4. Pharmacological context and therapeutic implications
|
| 163 |
+
\end{verbatim}
|
| 164 |
+
\end{small}
|
| 165 |
+
|
| 166 |
+
\textbf{CT L3}:
|
| 167 |
+
\begin{small}
|
| 168 |
+
\begin{verbatim}
|
| 169 |
+
The following clinical trial was confirmed as a FAILURE. Based on the
|
| 170 |
+
trial data below, provide a scientific explanation for why this drug
|
| 171 |
+
failed in this clinical trial.
|
| 172 |
+
|
| 173 |
+
[context_text]
|
| 174 |
+
|
| 175 |
+
Your explanation should address:
|
| 176 |
+
1. Mechanism -- What is the drug's mechanism of action and why might
|
| 177 |
+
it be insufficient for this condition?
|
| 178 |
+
2. Evidence interpretation -- What do the statistical results tell us?
|
| 179 |
+
3. Clinical factors -- Trial design, patient population, or disease
|
| 180 |
+
biology factors?
|
| 181 |
+
4. Broader context -- Known challenges of treating this condition?
|
| 182 |
+
|
| 183 |
+
Provide a thorough explanation in 3-5 paragraphs.
|
| 184 |
+
\end{verbatim}
|
| 185 |
+
\end{small}
|
| 186 |
+
|
| 187 |
+
\textbf{PPI L3}:
|
| 188 |
+
\begin{small}
|
| 189 |
+
\begin{verbatim}
|
| 190 |
+
The following two proteins have been experimentally tested and confirmed
|
| 191 |
+
to NOT physically interact. Based on the protein information below,
|
| 192 |
+
provide a scientific explanation for why they are unlikely to form a
|
| 193 |
+
physical interaction.
|
| 194 |
+
|
| 195 |
+
[context_text]
|
| 196 |
+
|
| 197 |
+
Your explanation should address:
|
| 198 |
+
1. Biological plausibility -- Are there biological reasons (function,
|
| 199 |
+
pathway, localization) that make interaction unlikely?
|
| 200 |
+
2. Structural reasoning -- Do domain architectures, binding interfaces,
|
| 201 |
+
or steric factors argue against interaction?
|
| 202 |
+
3. Mechanistic completeness -- Are multiple relevant factors considered?
|
| 203 |
+
4. Specificity -- Are claims specific to these proteins or generic?
|
| 204 |
+
|
| 205 |
+
Provide a thorough explanation in 3-5 paragraphs.
|
| 206 |
+
\end{verbatim}
|
| 207 |
+
\end{small}
|
| 208 |
+
|
| 209 |
+
\subsection{L4: Tested vs.\ Untested Discrimination}
|
| 210 |
+
|
| 211 |
+
All three domains use a similar L4 structure:
|
| 212 |
+
|
| 213 |
+
\begin{small}
|
| 214 |
+
\begin{verbatim}
|
| 215 |
+
DTI: "Has the following compound-target pair been experimentally
|
| 216 |
+
tested for binding activity?"
|
| 217 |
+
[compound name + target name]
|
| 218 |
+
|
| 219 |
+
CT: "Has the following drug-condition combination ever been tested
|
| 220 |
+
in a registered clinical trial?"
|
| 221 |
+
[drug name + condition name]
|
| 222 |
+
|
| 223 |
+
PPI: "Has the following protein pair ever been experimentally tested
|
| 224 |
+
for physical interaction?"
|
| 225 |
+
[protein A gene symbol + protein B gene symbol]
|
| 226 |
+
|
| 227 |
+
Response format (all domains):
|
| 228 |
+
"On the first line, respond with ONLY 'tested' or 'untested'.
|
| 229 |
+
On the second line, provide brief evidence for your answer."
|
| 230 |
+
\end{verbatim}
|
| 231 |
+
\end{small}
|
| 232 |
+
|
| 233 |
+
\subsection{Few-Shot Configuration}
|
| 234 |
+
|
| 235 |
+
For 3-shot prompting, three independent example sets are sampled (seeds 42, 43, 44), and results are reported as mean $\pm$ std across the three sets. Examples are formatted as:
|
| 236 |
+
|
| 237 |
+
\begin{small}
|
| 238 |
+
\begin{verbatim}
|
| 239 |
+
Here are examples of [task description]:
|
| 240 |
+
|
| 241 |
+
[Example 1 context]
|
| 242 |
+
Answer: [gold answer]
|
| 243 |
+
|
| 244 |
+
---
|
| 245 |
+
|
| 246 |
+
[Example 2 context]
|
| 247 |
+
Answer: [gold answer]
|
| 248 |
+
|
| 249 |
+
---
|
| 250 |
+
|
| 251 |
+
[Example 3 context]
|
| 252 |
+
Answer: [gold answer]
|
| 253 |
+
|
| 254 |
+
Now [task instruction]:
|
| 255 |
+
|
| 256 |
+
[Test instance context]
|
| 257 |
+
\end{verbatim}
|
| 258 |
+
\end{small}
|
paper/appendix/app_schema.tex
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\section{Database Schema}
|
| 2 |
+
\label{app:schema}
|
| 3 |
+
|
| 4 |
+
NegBioDB uses three separate SQLite databases, one per domain, sharing common design patterns: WAL journal mode, foreign key enforcement, COALESCE-based deduplication indexes, and a four-tier confidence system (gold/silver/bronze/copper). Full DDL for all migrations is available in the repository. Below we summarize the key tables.
|
| 5 |
+
|
| 6 |
+
\subsection{DTI Domain Schema}
|
| 7 |
+
|
| 8 |
+
Two migrations: \texttt{001\_initial\_schema} (core tables) and \texttt{002\_target\_variants} (variant support).
|
| 9 |
+
|
| 10 |
+
\begin{small}
|
| 11 |
+
\begin{verbatim}
|
| 12 |
+
-- Core entity tables
|
| 13 |
+
compounds (compound_id PK, canonical_smiles, inchikey UNIQUE,
|
| 14 |
+
inchikey_connectivity, pubchem_cid, chembl_id,
|
| 15 |
+
molecular_weight, logp, hbd, hba, tpsa, qed, ...)
|
| 16 |
+
|
| 17 |
+
targets (target_id PK, uniprot_accession UNIQUE,
|
| 18 |
+
chembl_target_id, gene_symbol, target_family,
|
| 19 |
+
development_level CHECK IN (Tclin/Tchem/Tbio/Tdark), ...)
|
| 20 |
+
|
| 21 |
+
assays (assay_id PK, source_db, source_assay_id,
|
| 22 |
+
assay_format CHECK IN (biochemical/cell-based/in_vivo),
|
| 23 |
+
screen_type, z_factor, pubmed_id, ...)
|
| 24 |
+
|
| 25 |
+
-- Core fact table (30.5M rows)
|
| 26 |
+
negative_results (result_id PK, compound_id FK, target_id FK, assay_id FK,
|
| 27 |
+
result_type CHECK IN (hard_negative/conditional_negative/
|
| 28 |
+
methodological_negative/dose_time_negative/
|
| 29 |
+
hypothesis_negative),
|
| 30 |
+
confidence_tier CHECK IN (gold/silver/bronze/copper),
|
| 31 |
+
activity_type, activity_value, pchembl_value,
|
| 32 |
+
source_db, source_record_id, extraction_method, ...)
|
| 33 |
+
|
| 34 |
+
-- Dedup: UNIQUE(compound_id, target_id, COALESCE(assay_id,-1),
|
| 35 |
+
-- source_db, source_record_id)
|
| 36 |
+
|
| 37 |
+
-- Aggregation (for ML export)
|
| 38 |
+
compound_target_pairs (pair_id PK, compound_id FK, target_id FK,
|
| 39 |
+
num_assays, num_sources, best_confidence,
|
| 40 |
+
compound_degree, target_degree, ...)
|
| 41 |
+
|
| 42 |
+
-- Variant support (migration 002)
|
| 43 |
+
target_variants (variant_id PK, target_id FK, variant_label,
|
| 44 |
+
source_db, UNIQUE(target_id, variant_label, ...))
|
| 45 |
+
\end{verbatim}
|
| 46 |
+
\end{small}
|
| 47 |
+
|
| 48 |
+
\subsection{CT Domain Schema}
|
| 49 |
+
|
| 50 |
+
Two migrations: \texttt{001\_ct\_initial\_schema} (core tables) and \texttt{002\_schema\_fixes} (expert review fixes).
|
| 51 |
+
|
| 52 |
+
\begin{small}
|
| 53 |
+
\begin{verbatim}
|
| 54 |
+
-- Entity tables
|
| 55 |
+
interventions (intervention_id PK, intervention_type CHECK IN
|
| 56 |
+
(drug/biologic/device/...),
|
| 57 |
+
intervention_name, chembl_id, canonical_smiles,
|
| 58 |
+
inchikey, molecular_type, ...)
|
| 59 |
+
|
| 60 |
+
conditions (condition_id PK, condition_name, mesh_id,
|
| 61 |
+
icd10_code, therapeutic_area, ...)
|
| 62 |
+
|
| 63 |
+
clinical_trials (trial_id PK, source_trial_id UNIQUE,
|
| 64 |
+
overall_status, trial_phase, enrollment_actual,
|
| 65 |
+
primary_endpoint, why_stopped,
|
| 66 |
+
termination_type CHECK IN (clinical_failure/
|
| 67 |
+
administrative/external_event/unknown), ...)
|
| 68 |
+
|
| 69 |
+
-- Core fact table (132,925 rows)
|
| 70 |
+
trial_failure_results (result_id PK, intervention_id FK,
|
| 71 |
+
condition_id FK, trial_id FK,
|
| 72 |
+
failure_category CHECK IN (efficacy/safety/pharmacokinetic/
|
| 73 |
+
enrollment/strategic/regulatory/design/other),
|
| 74 |
+
confidence_tier CHECK IN (gold/silver/bronze/copper),
|
| 75 |
+
p_value_primary, effect_size, serious_adverse_events,
|
| 76 |
+
highest_phase_reached, result_interpretation CHECK IN
|
| 77 |
+
(definitive_negative/inconclusive_underpowered/
|
| 78 |
+
mixed_endpoints/futility_stopped/safety_stopped/
|
| 79 |
+
administrative),
|
| 80 |
+
source_db, extraction_method, ...)
|
| 81 |
+
|
| 82 |
+
-- Dedup: UNIQUE(intervention_id, condition_id,
|
| 83 |
+
-- COALESCE(trial_id,-1), source_db, source_record_id)
|
| 84 |
+
|
| 85 |
+
-- Junction tables
|
| 86 |
+
trial_interventions (trial_id FK, intervention_id FK, arm_role)
|
| 87 |
+
trial_conditions (trial_id FK, condition_id FK)
|
| 88 |
+
intervention_targets (intervention_id FK, uniprot_accession, ...)
|
| 89 |
+
\end{verbatim}
|
| 90 |
+
\end{small}
|
| 91 |
+
|
| 92 |
+
\subsection{PPI Domain Schema}
|
| 93 |
+
|
| 94 |
+
Two migrations: \texttt{001\_ppi\_initial\_schema} (core tables) and \texttt{002\_llm\_annotations} (protein annotations for LLM benchmark).
|
| 95 |
+
|
| 96 |
+
\begin{small}
|
| 97 |
+
\begin{verbatim}
|
| 98 |
+
-- Entity table
|
| 99 |
+
proteins (protein_id PK, uniprot_accession UNIQUE,
|
| 100 |
+
gene_symbol, amino_acid_sequence, sequence_length,
|
| 101 |
+
subcellular_location,
|
| 102 |
+
function_description, go_terms,
|
| 103 |
+
domain_annotations, ...) -- migration 002
|
| 104 |
+
|
| 105 |
+
-- Core fact table (2.23M rows)
|
| 106 |
+
ppi_negative_results (result_id PK, protein1_id FK, protein2_id FK,
|
| 107 |
+
experiment_id FK,
|
| 108 |
+
evidence_type CHECK IN (experimental_non_interaction/
|
| 109 |
+
ml_predicted_negative/low_score_negative/
|
| 110 |
+
compartment_separated/literature_reported),
|
| 111 |
+
confidence_tier CHECK IN (gold/silver/bronze/copper),
|
| 112 |
+
interaction_score, detection_method,
|
| 113 |
+
source_db, extraction_method, ...,
|
| 114 |
+
CHECK (protein1_id < protein2_id)) -- canonical ordering
|
| 115 |
+
|
| 116 |
+
-- Dedup: UNIQUE(protein1_id, protein2_id,
|
| 117 |
+
-- COALESCE(experiment_id,-1),
|
| 118 |
+
-- source_db, source_record_id)
|
| 119 |
+
|
| 120 |
+
-- Aggregation
|
| 121 |
+
protein_protein_pairs (pair_id PK, protein1_id FK, protein2_id FK,
|
| 122 |
+
num_experiments, num_sources, best_confidence,
|
| 123 |
+
protein1_degree, protein2_degree, ...,
|
| 124 |
+
CHECK (protein1_id < protein2_id))
|
| 125 |
+
|
| 126 |
+
-- LLM support (migration 002)
|
| 127 |
+
ppi_publication_abstracts (pmid PK, title, abstract, ...)
|
| 128 |
+
\end{verbatim}
|
| 129 |
+
\end{small}
|
| 130 |
+
|
| 131 |
+
\subsection{Common Design Patterns}
|
| 132 |
+
|
| 133 |
+
\begin{itemize}[nosep,leftmargin=*]
|
| 134 |
+
\item \textbf{Deduplication:} All fact tables use \texttt{COALESCE(fk, -1)} in UNIQUE indexes to handle NULL foreign keys (SQLite treats NULLs as distinct in UNIQUE constraints).
|
| 135 |
+
\item \textbf{Confidence tiers:} Four-level system across all domains: gold (systematic screens, multiple confirmations) $>$ silver (ML-derived, p-value based) $>$ bronze (computational, NLP-detected) $>$ copper (label-only).
|
| 136 |
+
\item \textbf{Aggregation tables:} Pre-computed pair-level statistics for ML export, avoiding expensive JOINs during dataset construction.
|
| 137 |
+
\item \textbf{Symmetric pairs (PPI):} \texttt{CHECK (protein1\_id $<$ protein2\_id)} enforces canonical ordering, preventing duplicate pair representations.
|
| 138 |
+
\item \textbf{Schema migrations:} All databases track applied migrations in a \texttt{schema\_migrations} table for reproducible upgrades.
|
| 139 |
+
\end{itemize}
|
paper/appendix/app_splits.tex
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\section{Splitting Strategy Details}
|
| 2 |
+
\label{app:splits}
|
| 3 |
+
|
| 4 |
+
This appendix describes the splitting strategies used across the three domains and their implementation details.
|
| 5 |
+
|
| 6 |
+
\subsection{Split Strategy Overview}
|
| 7 |
+
|
| 8 |
+
\begin{table}[h]
|
| 9 |
+
\centering
|
| 10 |
+
\caption{Split strategies by domain. \checkmark = implemented, --- = not applicable.}
|
| 11 |
+
\label{tab:split_overview}
|
| 12 |
+
\scriptsize
|
| 13 |
+
\begin{tabular}{@{}lcccp{5.5cm}@{}}
|
| 14 |
+
\toprule
|
| 15 |
+
\textbf{Strategy} & \textbf{DTI} & \textbf{CT} & \textbf{PPI} & \textbf{Description} \\
|
| 16 |
+
\midrule
|
| 17 |
+
Random & \checkmark & \checkmark & \checkmark & Stratified random assignment (70/10/20) \\
|
| 18 |
+
Cold\_compound/drug & \checkmark & \checkmark & --- & All pairs with held-out compounds in test \\
|
| 19 |
+
Cold\_target/condition & \checkmark & \checkmark & --- & All pairs with held-out targets in test \\
|
| 20 |
+
Cold\_protein & --- & --- & \checkmark & All pairs with held-out proteins in test \\
|
| 21 |
+
Cold\_both & --- & --- & \checkmark & METIS graph partitioning; unseen proteins on both sides \\
|
| 22 |
+
Temporal & --- & \checkmark & --- & $\leq$2017 train, 2018--19 val, $\geq$2020 test \\
|
| 23 |
+
Scaffold & --- & \checkmark & --- & Murcko scaffold-based grouping \\
|
| 24 |
+
DDB & \checkmark & --- & \checkmark & Degree-balanced binning \\
|
| 25 |
+
\bottomrule
|
| 26 |
+
\end{tabular}
|
| 27 |
+
\end{table}
|
| 28 |
+
|
| 29 |
+
\subsection{Cold Splitting}
|
| 30 |
+
|
| 31 |
+
\textbf{Cold compound/drug/protein.} Entities are randomly partitioned into train/val/test groups. All pairs containing a held-out entity are assigned to the corresponding fold. This tests generalization to unseen chemical or biological entities.
|
| 32 |
+
|
| 33 |
+
\textbf{Cold\_both (PPI only).} We use METIS graph partitioning~\citep{karypis1998metis} to partition proteins into three groups such that proteins in the test set have no interactions with proteins in the training set. This creates a maximally challenging generalization test where \emph{both} proteins in a test pair are unseen during training. Implementation uses the \texttt{pymetis} library with $k$=3 partitions, targeting 70/10/20 splits. The resulting test partition contains only 1.7\% positive examples (242/14,037) due to the extreme network separation, creating a highly imbalanced evaluation setting.
|
| 34 |
+
|
| 35 |
+
\subsection{Temporal Splitting (CT only)}
|
| 36 |
+
|
| 37 |
+
Clinical trials are split by primary completion date: trials completing $\leq$2017 form the training set (42,676 pairs), 2018--2019 form validation (9,257 pairs), and $\geq$2020 form the test set (50,917 pairs). This mimics a realistic prospective prediction scenario. A known limitation: the temporal split can produce single-class validation sets (all negative) for CT-M1, since successful trials are rare in certain time windows. When this occurs, AUROC and other threshold-dependent metrics are undefined.
|
| 38 |
+
|
| 39 |
+
\subsection{Scaffold Splitting (CT only)}
|
| 40 |
+
|
| 41 |
+
For interventions with resolved SMILES structures (41,240 of 102,850 CT pairs), we compute Murcko scaffolds~\citep{bemis1996murcko} using RDKit. Pairs are grouped by scaffold, then scaffolds are assigned to train/val/test folds. The remaining 61,610 pairs without SMILES are assigned NULL scaffolds and randomly distributed. This tests whether models generalize to structurally novel drug classes.
|
| 42 |
+
|
| 43 |
+
\subsection{Degree-Balanced Splitting (DTI, PPI)}
|
| 44 |
+
|
| 45 |
+
Following~\citet{zheng2020ddb}, entities are binned by their interaction degree (number of partners), and each bin is independently split into train/val/test. This ensures that high-degree and low-degree entities are proportionally represented in each fold, preventing evaluation bias toward well-studied entities. In our experiments, DDB performance was similar to random splitting across all domains (Table~\ref{tab:ml_results}), suggesting degree imbalance is not a major confound in NegBioDB.
|
| 46 |
+
|
| 47 |
+
\subsection{Control Negative Generation}
|
| 48 |
+
|
| 49 |
+
For Experiment~1 (negative source inflation), we generate two types of control negatives:
|
| 50 |
+
|
| 51 |
+
\begin{itemize}[nosep,leftmargin=*]
|
| 52 |
+
\item \textbf{Uniform random:} Randomly sampled entity pairs not present in the positive set or NegBioDB negatives. Equal in size to the NegBioDB negative set.
|
| 53 |
+
\item \textbf{Degree-matched:} Random pairs where each entity's degree matches the degree distribution of the NegBioDB negative set. This controls for the hypothesis that degree alone explains performance differences.
|
| 54 |
+
\end{itemize}
|
| 55 |
+
|
| 56 |
+
Both control sets are generated per-seed for CT and PPI (3 seeds) and once for DTI (seed 42). Conflicts between control negatives and positive pairs are removed before training.
|
paper/appendix/appendix_main.tex
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\appendix
|
| 2 |
+
|
| 3 |
+
\input{appendix/app_datasheet}
|
| 4 |
+
\input{appendix/app_ml_tables}
|
| 5 |
+
\input{appendix/app_llm_tables}
|
| 6 |
+
\input{appendix/app_l3_analysis}
|
| 7 |
+
\input{appendix/app_contamination}
|
| 8 |
+
\input{appendix/app_prompts}
|
| 9 |
+
\input{appendix/app_schema}
|
| 10 |
+
\input{appendix/app_splits}
|
| 11 |
+
\input{appendix/app_croissant}
|
| 12 |
+
\input{appendix/app_checklist}
|
paper/checklist.tex
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\section*{NeurIPS Paper Checklist}
|
| 2 |
+
|
| 3 |
+
\begin{enumerate}
|
| 4 |
+
|
| 5 |
+
\item {\bf Claims}
|
| 6 |
+
\item[] Question: Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope?
|
| 7 |
+
\item[] Answer: \answerYes{}
|
| 8 |
+
\item[] Justification: The abstract and introduction (Section~\ref{sec:introduction}) state four specific contributions---NegBioDB database, NegBioBench benchmark, negative source inflation analysis, and the opacity gradient---all supported by experimental results in Section~\ref{sec:experiments} with exact numerical values.
|
| 9 |
+
|
| 10 |
+
\item {\bf Limitations}
|
| 11 |
+
\item[] Question: Does the paper discuss the limitations of the work performed by the authors?
|
| 12 |
+
\item[] Answer: \answerYes{}
|
| 13 |
+
\item[] Justification: Section~\ref{sec:discussion} includes a dedicated limitations paragraph addressing: solo authorship, DTI single seed, CT drug resolution coverage (20.6\%), PPI L1/L2 trivial solvability, L3 judge ceiling effect, and contamination analysis scope.
|
| 14 |
+
|
| 15 |
+
\item {\bf Theory assumptions and proofs}
|
| 16 |
+
\item[] Question: For each theoretical result, does the paper provide the full set of assumptions and a complete (and correct) proof?
|
| 17 |
+
\item[] Answer: \answerNA{}
|
| 18 |
+
\item[] Justification: This paper presents empirical results and a benchmark; it does not include theoretical results or proofs.
|
| 19 |
+
|
| 20 |
+
\item {\bf Experimental result reproducibility}
|
| 21 |
+
\item[] Question: Does the paper fully disclose all the information needed to reproduce the main experimental results of the paper to the extent that it affects the main claims and/or conclusions of the paper (regardless of whether the code and data are provided or not)?
|
| 22 |
+
\item[] Answer: \answerYes{}
|
| 23 |
+
\item[] Justification: Section~\ref{sec:benchmark} describes all models, splits, and configurations. Appendix~\ref{app:splits} details splitting strategies, Appendix~\ref{app:prompts} provides full LLM prompt templates, and Appendix~\ref{app:schema} provides database schema. All hyperparameters and random seeds are specified. Complete per-run results appear in Appendices~\ref{app:ml_tables}--\ref{app:llm_tables}.
|
| 24 |
+
|
| 25 |
+
\item {\bf Open access to data and code}
|
| 26 |
+
\item[] Question: Does the paper provide open access to the data and code, with sufficient instructions to faithfully reproduce the main experimental results, as described in supplemental material?
|
| 27 |
+
\item[] Answer: \answerYes{}
|
| 28 |
+
\item[] Justification: The database (SQLite files), ML exports (Parquet), and all source code are released via GitHub and HuggingFace under CC BY-SA 4.0 (data) and MIT (code) licenses. Croissant metadata is provided for machine-readable dataset discovery. SLURM scripts for HPC execution are included.
|
| 29 |
+
|
| 30 |
+
\item {\bf Experimental setting/details}
|
| 31 |
+
\item[] Question: Does the paper specify all the training and test details (e.g., data splits, hyperparameters, how they were chosen, type of optimizer) necessary to understand the results?
|
| 32 |
+
\item[] Answer: \answerYes{}
|
| 33 |
+
\item[] Justification: Section~\ref{sec:benchmark} specifies all models, split strategies, and evaluation metrics. Appendix~\ref{app:splits} provides complete split details including ratios, temporal cutoffs, and METIS parameters. LLM configurations (zero-shot and 3-shot with seeds 42/43/44) are specified.
|
| 34 |
+
|
| 35 |
+
\item {\bf Experiment statistical significance}
|
| 36 |
+
\item[] Question: Does the paper report error bars suitably and correctly defined or other appropriate information about the statistical significance of the experiments?
|
| 37 |
+
\item[] Answer: \answerYes{}
|
| 38 |
+
\item[] Justification: CT and PPI experiments use 3 random seeds (42, 43, 44); results are reported as mean $\pm$ standard deviation. DTI uses a single seed, which is acknowledged as a limitation in Section~\ref{sec:discussion}. LLM 3-shot results report mean $\pm$ std across 3 independent example sets.
|
| 39 |
+
|
| 40 |
+
\item {\bf Experiments compute resources}
|
| 41 |
+
\item[] Question: For each experiment, does the paper provide sufficient information on the computer resources (type of compute workers, memory, time of execution) needed to reproduce the experiments?
|
| 42 |
+
\item[] Answer: \answerYes{}
|
| 43 |
+
\item[] Justification: ML experiments were run on Cornell Cayuga HPC cluster (NVIDIA A100 GPUs, 40GB). Local LLMs (Llama, Qwen, Mistral) used vLLM on A100s. API-based LLMs used commercial endpoints (OpenAI, Google, Anthropic). Database construction ran on Apple M1 Mac (64GB RAM). Total compute: approximately 500 GPU-hours for ML training, 200 GPU-hours for local LLM inference.
|
| 44 |
+
|
| 45 |
+
\item {\bf Code of ethics}
|
| 46 |
+
\item[] Question: Does the research conducted in the paper conform, in every respect, with the NeurIPS Code of Ethics \url{https://neurips.cc/public/EthicsGuidelines}?
|
| 47 |
+
\item[] Answer: \answerYes{}
|
| 48 |
+
\item[] Justification: All data is derived from public databases with appropriate licenses. No human subjects or private data are involved. The work aims to improve biomedical AI evaluation, with potential benefits for drug discovery and clinical trial design.
|
| 49 |
+
|
| 50 |
+
\item {\bf Broader impacts}
|
| 51 |
+
\item[] Question: Does the paper discuss both potential positive societal impacts and negative societal impacts of the work performed?
|
| 52 |
+
\item[] Answer: \answerYes{}
|
| 53 |
+
\item[] Justification: Section~\ref{sec:discussion} discusses positive impacts (improving drug discovery, preventing wasted research effort) and negative impacts (100\% evidence hallucination rate as a safety concern for LLM deployment in clinical settings). The opacity gradient finding directly warns against deploying LLMs for negative result assessment without contamination controls.
|
| 54 |
+
|
| 55 |
+
\item {\bf Safeguards}
|
| 56 |
+
\item[] Question: Does the paper describe safeguards that have been put in place for responsible release of data or models that have a high risk for misuse (e.g., pre-trained language models, image generators, or scraped datasets)?
|
| 57 |
+
\item[] Answer: \answerNA{}
|
| 58 |
+
\item[] Justification: NegBioDB contains only publicly available biomedical data (compound structures, protein sequences, clinical trial metadata) aggregated from 12 public databases. No pre-trained models are released. The data poses no risk for misuse beyond its source databases.
|
| 59 |
+
|
| 60 |
+
\item {\bf Licenses for existing assets}
|
| 61 |
+
\item[] Question: Are the creators or original owners of assets (e.g., code, data, models), used in the paper, properly credited and are the license and terms of use explicitly mentioned and properly respected?
|
| 62 |
+
\item[] Answer: \answerYes{}
|
| 63 |
+
\item[] Justification: All 12 source databases are cited with their licenses: ChEMBL (CC BY-SA 3.0), PubChem (public domain), BindingDB (CC BY 3.0), DAVIS (CC BY 4.0), AACT (public domain), CTO (MIT), Open Targets (Apache 2.0), Shi \& Du 2024 (CC BY 4.0), IntAct (CC BY 4.0), HuRI (CC BY 4.0), hu.MAP (public), STRING (CC BY 4.0). NegBioDB adopts CC BY-SA 4.0 to comply with ChEMBL's viral share-alike clause.
|
| 64 |
+
|
| 65 |
+
\item {\bf New assets}
|
| 66 |
+
\item[] Question: Are new assets introduced in the paper well documented and is the documentation provided alongside the assets?
|
| 67 |
+
\item[] Answer: \answerYes{}
|
| 68 |
+
\item[] Justification: NegBioDB is documented via: Gebru et al.\ Datasheet (Appendix~\ref{app:datasheet}), database schema (Appendix~\ref{app:schema}), Croissant JSON-LD metadata, HuggingFace dataset card, and comprehensive README. The benchmark code includes 800+ automated tests.
|
| 69 |
+
|
| 70 |
+
\item {\bf Crowdsourcing and research with human subjects}
|
| 71 |
+
\item[] Question: For crowdsourcing experiments and research with human subjects, does the paper include the full text of instructions given to participants and screenshots, if applicable, as well as details about compensation (if any)?
|
| 72 |
+
\item[] Answer: \answerNA{}
|
| 73 |
+
\item[] Justification: This research does not involve crowdsourcing or human subjects. All data comes from public databases and all experiments are computational.
|
| 74 |
+
|
| 75 |
+
\item {\bf Institutional review board (IRB) approvals or equivalent for research with human subjects}
|
| 76 |
+
\item[] Question: Does the paper describe potential risks incurred by study participants, whether such risks were disclosed to the subjects, and whether Institutional Review Board (IRB) approvals (or an equivalent approval/review based on the requirements of your country or institution) were obtained?
|
| 77 |
+
\item[] Answer: \answerNA{}
|
| 78 |
+
\item[] Justification: No human subjects research is involved. Clinical trial data is sourced from the public AACT database (de-identified).
|
| 79 |
+
|
| 80 |
+
\item {\bf Declaration of LLM usage}
|
| 81 |
+
\item[] Question: Does the paper describe the usage of LLMs if it is an important, original, or non-standard component of the core methods in this research?
|
| 82 |
+
\item[] Answer: \answerYes{}
|
| 83 |
+
\item[] Justification: LLMs are a core component of the benchmark evaluation (Section~\ref{sec:benchmark}, LLM Track). Five LLMs are evaluated as subjects across 4 levels and 3 domains (241 total runs). Domain-specific LLM judges are used for L3 reasoning evaluation: GPT-4o-mini (CT), Gemini-2.5-Flash (PPI), and Gemini-2.5-Flash-Lite (DTI). All model identifiers, versions, and configurations are specified. Claude Code was used for code development and paper writing assistance.
|
| 84 |
+
|
| 85 |
+
\end{enumerate}
|
paper/main.tex
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\documentclass{article}
|
| 2 |
+
|
| 3 |
+
% NeurIPS 2026 Evaluations & Datasets Track (single-blind)
|
| 4 |
+
\usepackage[eandd, nonanonymous]{neurips_2026}
|
| 5 |
+
|
| 6 |
+
\usepackage[utf8]{inputenc}
|
| 7 |
+
\usepackage[T1]{fontenc}
|
| 8 |
+
\usepackage{hyperref}
|
| 9 |
+
\usepackage{url}
|
| 10 |
+
\usepackage{booktabs}
|
| 11 |
+
\usepackage{amsfonts}
|
| 12 |
+
\usepackage{amsmath}
|
| 13 |
+
\usepackage{nicefrac}
|
| 14 |
+
\usepackage{microtype}
|
| 15 |
+
\usepackage{xcolor}
|
| 16 |
+
\usepackage{graphicx}
|
| 17 |
+
\usepackage{subcaption}
|
| 18 |
+
\usepackage{multirow}
|
| 19 |
+
\usepackage{enumitem}
|
| 20 |
+
|
| 21 |
+
\title{NegBioDB: A Multi-Domain Database and Benchmark for Experimentally Confirmed Negative Results in Biomedicine}
|
| 22 |
+
|
| 23 |
+
\author{%
|
| 24 |
+
Jae-Yoon Jung \\
|
| 25 |
+
Department of Physiology and Biophysics \\
|
| 26 |
+
Weill Cornell Medicine \\
|
| 27 |
+
New York, NY 10065 \\
|
| 28 |
+
\texttt{jaj2043@med.cornell.edu}
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
\begin{document}
|
| 32 |
+
|
| 33 |
+
\maketitle
|
| 34 |
+
|
| 35 |
+
\input{sections/abstract}
|
| 36 |
+
\input{sections/introduction}
|
| 37 |
+
\input{sections/database}
|
| 38 |
+
\input{sections/benchmark}
|
| 39 |
+
\input{sections/experiments}
|
| 40 |
+
\input{sections/discussion}
|
| 41 |
+
|
| 42 |
+
\bibliographystyle{plainnat}
|
| 43 |
+
\bibliography{references}
|
| 44 |
+
|
| 45 |
+
\newpage
|
| 46 |
+
\input{appendix/appendix_main}
|
| 47 |
+
|
| 48 |
+
\end{document}
|
paper/neurips_2026.sty
ADDED
|
@@ -0,0 +1,437 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
% partial rewrite of the LaTeX2e package for submissions to the
|
| 2 |
+
% Conference on Neural Information Processing Systems (NeurIPS):
|
| 3 |
+
%
|
| 4 |
+
% - uses more LaTeX conventions
|
| 5 |
+
% - line numbers at submission time replaced with aligned numbers from
|
| 6 |
+
% lineno package
|
| 7 |
+
% - \nipsfinalcopy replaced with [final] package option
|
| 8 |
+
% - automatically loads times package for authors
|
| 9 |
+
% - loads natbib automatically; this can be suppressed with the
|
| 10 |
+
% [nonatbib] package option
|
| 11 |
+
% - adds foot line to first page identifying the conference
|
| 12 |
+
% - adds preprint option for submission to e.g. arXiv
|
| 13 |
+
% - conference acronym modified
|
| 14 |
+
% - update foot line to display the track name
|
| 15 |
+
%
|
| 16 |
+
% Roman Garnett (garnett@wustl.edu) and the many authors of
|
| 17 |
+
% nips15submit_e.sty, including MK and drstrip@sandia
|
| 18 |
+
%
|
| 19 |
+
% last revision: January 2026
|
| 20 |
+
|
| 21 |
+
\NeedsTeXFormat{LaTeX2e}
|
| 22 |
+
\ProvidesPackage{neurips_2026}[2026-01-29 NeurIPS 2026 submission/camera-ready style file]
|
| 23 |
+
|
| 24 |
+
% declare final option, which creates camera-ready copy
|
| 25 |
+
\newif\if@neuripsfinal\@neuripsfinalfalse
|
| 26 |
+
\DeclareOption{final}{
|
| 27 |
+
\@neuripsfinaltrue
|
| 28 |
+
\@anonymousfalse
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
% declare nonatbib option, which does not load natbib in case of
|
| 32 |
+
% package clash (users can pass options to natbib via
|
| 33 |
+
% \PassOptionsToPackage)
|
| 34 |
+
\newif\if@natbib\@natbibtrue
|
| 35 |
+
\DeclareOption{nonatbib}{
|
| 36 |
+
\@natbibfalse
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
% declare preprint option, which creates a preprint version ready for
|
| 40 |
+
% upload to, e.g., arXiv
|
| 41 |
+
\newif\if@preprint\@preprintfalse
|
| 42 |
+
\DeclareOption{preprint}{
|
| 43 |
+
\@preprinttrue
|
| 44 |
+
\@anonymousfalse
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
% determine the track of the paper in camera-ready mode
|
| 48 |
+
\newif\if@main\@maintrue
|
| 49 |
+
\DeclareOption{main}{
|
| 50 |
+
\@maintrue
|
| 51 |
+
\newcommand{\@trackname}{\@neuripsordinal\ Conference on Neural Information Processing Systems (NeurIPS \@neuripsyear).}
|
| 52 |
+
}
|
| 53 |
+
\newif\if@position\@positionfalse
|
| 54 |
+
\DeclareOption{position}{
|
| 55 |
+
\@positiontrue
|
| 56 |
+
\newcommand{\@trackname}{\@neuripsordinal\ Conference on Neural Information Processing Systems (NeurIPS \@neuripsyear). Position Paper Track.}
|
| 57 |
+
}
|
| 58 |
+
\newif\if@eandd\@eanddfalse
|
| 59 |
+
\DeclareOption{eandd}{
|
| 60 |
+
\@eanddtrue
|
| 61 |
+
\if@neuripsfinal\@anonymousfalse\else\if@preprint\@anonymousfalse\else\@anonymoustrue\fi\fi
|
| 62 |
+
\newcommand{\@trackname}{\@neuripsordinal\ Conference on Neural Information Processing Systems (NeurIPS \@neuripsyear). Track on Evaluations and Datasets.}
|
| 63 |
+
}
|
| 64 |
+
\newif\if@creativeai\@creativeaifalse
|
| 65 |
+
\DeclareOption{creativeai}{
|
| 66 |
+
\@creativeaitrue
|
| 67 |
+
\@anonymousfalse
|
| 68 |
+
\newcommand{\@trackname}{\@neuripsordinal\ Conference on Neural Information Processing Systems (NeurIPS \@neuripsyear). Creative AI Track.}
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
% For anonymous or non-anonymous
|
| 72 |
+
\newif\if@anonymous\@anonymoustrue
|
| 73 |
+
|
| 74 |
+
% For workshop papers
|
| 75 |
+
\newcommand{\@workshoptitle}{}
|
| 76 |
+
\newcommand{\workshoptitle}[1]{\renewcommand{\@workshoptitle}{#1}}
|
| 77 |
+
|
| 78 |
+
\newif\if@workshop\@workshopfalse
|
| 79 |
+
\DeclareOption{sglblindworkshop}{
|
| 80 |
+
\@workshoptrue
|
| 81 |
+
\@anonymousfalse
|
| 82 |
+
\newcommand{\@trackname}{\@neuripsordinal\ Conference on Neural Information Processing Systems (NeurIPS \@neuripsyear). Workshop: \@workshoptitle.}
|
| 83 |
+
}
|
| 84 |
+
\DeclareOption{dblblindworkshop}{
|
| 85 |
+
\@workshoptrue
|
| 86 |
+
\newcommand{\@trackname}{\@neuripsordinal\ Conference on Neural Information Processing Systems (NeurIPS \@neuripsyear). Workshop: \@workshoptitle.}
|
| 87 |
+
}
|
| 88 |
+
\DeclareOption{nonanonymous}{
|
| 89 |
+
\@anonymousfalse
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
\ProcessOptions\relax
|
| 93 |
+
|
| 94 |
+
% fonts
|
| 95 |
+
\renewcommand{\rmdefault}{ptm}
|
| 96 |
+
\renewcommand{\sfdefault}{phv}
|
| 97 |
+
|
| 98 |
+
% change this every year for notice string at bottom
|
| 99 |
+
\newcommand{\@neuripsordinal}{40th}
|
| 100 |
+
\newcommand{\@neuripsyear}{2026}
|
| 101 |
+
\newcommand{\@neuripslocation}{Sydney}
|
| 102 |
+
|
| 103 |
+
% acknowledgments
|
| 104 |
+
\usepackage{environ}
|
| 105 |
+
\newcommand{\acksection}{\section*{Acknowledgments and Disclosure of Funding}}
|
| 106 |
+
\NewEnviron{ack}{%
|
| 107 |
+
\acksection
|
| 108 |
+
\BODY
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
% load natbib unless told otherwise
|
| 113 |
+
\if@natbib
|
| 114 |
+
\RequirePackage{natbib}
|
| 115 |
+
\fi
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
% set page geometry
|
| 122 |
+
\usepackage[verbose=true,letterpaper]{geometry}
|
| 123 |
+
\AtBeginDocument{
|
| 124 |
+
\newgeometry{
|
| 125 |
+
textheight=9in,
|
| 126 |
+
textwidth=5.5in,
|
| 127 |
+
top=1in,
|
| 128 |
+
headheight=12pt,
|
| 129 |
+
headsep=25pt,
|
| 130 |
+
footskip=30pt
|
| 131 |
+
}
|
| 132 |
+
\@ifpackageloaded{fullpage}
|
| 133 |
+
{\PackageWarning{neurips_2026}{fullpage package not allowed! Overwriting formatting.}}
|
| 134 |
+
{}
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
\widowpenalty=10000
|
| 138 |
+
\clubpenalty=10000
|
| 139 |
+
\flushbottom
|
| 140 |
+
\sloppy
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
% font sizes with reduced leading
|
| 144 |
+
\renewcommand{\normalsize}{%
|
| 145 |
+
\@setfontsize\normalsize\@xpt\@xipt
|
| 146 |
+
\abovedisplayskip 7\p@ \@plus 2\p@ \@minus 5\p@
|
| 147 |
+
\abovedisplayshortskip \z@ \@plus 3\p@
|
| 148 |
+
\belowdisplayskip \abovedisplayskip
|
| 149 |
+
\belowdisplayshortskip 4\p@ \@plus 3\p@ \@minus 3\p@
|
| 150 |
+
}
|
| 151 |
+
\normalsize
|
| 152 |
+
\renewcommand{\small}{%
|
| 153 |
+
\@setfontsize\small\@ixpt\@xpt
|
| 154 |
+
\abovedisplayskip 6\p@ \@plus 1.5\p@ \@minus 4\p@
|
| 155 |
+
\abovedisplayshortskip \z@ \@plus 2\p@
|
| 156 |
+
\belowdisplayskip \abovedisplayskip
|
| 157 |
+
\belowdisplayshortskip 3\p@ \@plus 2\p@ \@minus 2\p@
|
| 158 |
+
}
|
| 159 |
+
\renewcommand{\footnotesize}{\@setfontsize\footnotesize\@ixpt\@xpt}
|
| 160 |
+
\renewcommand{\scriptsize}{\@setfontsize\scriptsize\@viipt\@viiipt}
|
| 161 |
+
\renewcommand{\tiny}{\@setfontsize\tiny\@vipt\@viipt}
|
| 162 |
+
\renewcommand{\large}{\@setfontsize\large\@xiipt{14}}
|
| 163 |
+
\renewcommand{\Large}{\@setfontsize\Large\@xivpt{16}}
|
| 164 |
+
\renewcommand{\LARGE}{\@setfontsize\LARGE\@xviipt{20}}
|
| 165 |
+
\renewcommand{\huge}{\@setfontsize\huge\@xxpt{23}}
|
| 166 |
+
\renewcommand{\Huge}{\@setfontsize\Huge\@xxvpt{28}}
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
% Force \tiny to be no smaller than 6pt
|
| 170 |
+
\renewcommand{\tiny}{\fontsize{6pt}{7pt}\selectfont}
|
| 171 |
+
|
| 172 |
+
% Force \scriptsize to be no smaller than 7pt
|
| 173 |
+
\renewcommand{\scriptsize}{\fontsize{7pt}{8pt}\selectfont}
|
| 174 |
+
|
| 175 |
+
% Force \footnotesize to be no smaller than 8pt
|
| 176 |
+
\renewcommand{\footnotesize}{\fontsize{8pt}{9.5pt}\selectfont}
|
| 177 |
+
|
| 178 |
+
% sections with less space
|
| 179 |
+
\providecommand{\section}{}
|
| 180 |
+
\renewcommand{\section}{%
|
| 181 |
+
\@startsection{section}{1}{\z@}%
|
| 182 |
+
{-2.0ex \@plus -0.5ex \@minus -0.2ex}%
|
| 183 |
+
{ 1.5ex \@plus 0.3ex \@minus 0.2ex}%
|
| 184 |
+
{\large\bf\raggedright}%
|
| 185 |
+
}
|
| 186 |
+
\providecommand{\subsection}{}
|
| 187 |
+
\renewcommand{\subsection}{%
|
| 188 |
+
\@startsection{subsection}{2}{\z@}%
|
| 189 |
+
{-1.8ex \@plus -0.5ex \@minus -0.2ex}%
|
| 190 |
+
{ 0.8ex \@plus 0.2ex}%
|
| 191 |
+
{\normalsize\bf\raggedright}%
|
| 192 |
+
}
|
| 193 |
+
\providecommand{\subsubsection}{}
|
| 194 |
+
\renewcommand{\subsubsection}{%
|
| 195 |
+
\@startsection{subsubsection}{3}{\z@}%
|
| 196 |
+
{-1.5ex \@plus -0.5ex \@minus -0.2ex}%
|
| 197 |
+
{ 0.5ex \@plus 0.2ex}%
|
| 198 |
+
{\normalsize\bf\raggedright}%
|
| 199 |
+
}
|
| 200 |
+
\providecommand{\paragraph}{}
|
| 201 |
+
\renewcommand{\paragraph}{%
|
| 202 |
+
\@startsection{paragraph}{4}{\z@}%
|
| 203 |
+
{1.5ex \@plus 0.5ex \@minus 0.2ex}%
|
| 204 |
+
{-1em}%
|
| 205 |
+
{\normalsize\bf}%
|
| 206 |
+
}
|
| 207 |
+
\providecommand{\subparagraph}{}
|
| 208 |
+
\renewcommand{\subparagraph}{%
|
| 209 |
+
\@startsection{subparagraph}{5}{\z@}%
|
| 210 |
+
{1.5ex \@plus 0.5ex \@minus 0.2ex}%
|
| 211 |
+
{-1em}%
|
| 212 |
+
{\normalsize\bf}%
|
| 213 |
+
}
|
| 214 |
+
\providecommand{\subsubsubsection}{}
|
| 215 |
+
\renewcommand{\subsubsubsection}{%
|
| 216 |
+
\vskip5pt{\noindent\normalsize\rm\raggedright}%
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
% float placement
|
| 220 |
+
\renewcommand{\topfraction }{0.85}
|
| 221 |
+
\renewcommand{\bottomfraction }{0.4}
|
| 222 |
+
\renewcommand{\textfraction }{0.1}
|
| 223 |
+
\renewcommand{\floatpagefraction}{0.7}
|
| 224 |
+
|
| 225 |
+
\newlength{\@neuripsabovecaptionskip}\setlength{\@neuripsabovecaptionskip}{7\p@}
|
| 226 |
+
\newlength{\@neuripsbelowcaptionskip}\setlength{\@neuripsbelowcaptionskip}{\z@}
|
| 227 |
+
|
| 228 |
+
\setlength{\abovecaptionskip}{\@neuripsabovecaptionskip}
|
| 229 |
+
\setlength{\belowcaptionskip}{\@neuripsbelowcaptionskip}
|
| 230 |
+
|
| 231 |
+
% swap above/belowcaptionskip lengths for tables
|
| 232 |
+
\renewenvironment{table}
|
| 233 |
+
{\setlength{\abovecaptionskip}{\@neuripsbelowcaptionskip}%
|
| 234 |
+
\setlength{\belowcaptionskip}{\@neuripsabovecaptionskip}%
|
| 235 |
+
\@float{table}}
|
| 236 |
+
{\end@float}
|
| 237 |
+
|
| 238 |
+
% footnote formatting
|
| 239 |
+
\setlength{\footnotesep }{6.65\p@}
|
| 240 |
+
\setlength{\skip\footins}{9\p@ \@plus 4\p@ \@minus 2\p@}
|
| 241 |
+
\renewcommand{\footnoterule}{\kern-3\p@ \hrule width 12pc \kern 2.6\p@}
|
| 242 |
+
\setcounter{footnote}{0}
|
| 243 |
+
|
| 244 |
+
% paragraph formatting
|
| 245 |
+
\setlength{\parindent}{\z@}
|
| 246 |
+
\setlength{\parskip }{5.5\p@}
|
| 247 |
+
|
| 248 |
+
% list formatting
|
| 249 |
+
\setlength{\topsep }{4\p@ \@plus 1\p@ \@minus 2\p@}
|
| 250 |
+
\setlength{\partopsep }{1\p@ \@plus 0.5\p@ \@minus 0.5\p@}
|
| 251 |
+
\setlength{\itemsep }{2\p@ \@plus 1\p@ \@minus 0.5\p@}
|
| 252 |
+
\setlength{\parsep }{2\p@ \@plus 1\p@ \@minus 0.5\p@}
|
| 253 |
+
\setlength{\leftmargin }{3pc}
|
| 254 |
+
\setlength{\leftmargini }{\leftmargin}
|
| 255 |
+
\setlength{\leftmarginii }{2em}
|
| 256 |
+
\setlength{\leftmarginiii}{1.5em}
|
| 257 |
+
\setlength{\leftmarginiv }{1.0em}
|
| 258 |
+
\setlength{\leftmarginv }{0.5em}
|
| 259 |
+
\def\@listi {\leftmargin\leftmargini}
|
| 260 |
+
\def\@listii {\leftmargin\leftmarginii
|
| 261 |
+
\labelwidth\leftmarginii
|
| 262 |
+
\advance\labelwidth-\labelsep
|
| 263 |
+
\topsep 2\p@ \@plus 1\p@ \@minus 0.5\p@
|
| 264 |
+
\parsep 1\p@ \@plus 0.5\p@ \@minus 0.5\p@
|
| 265 |
+
\itemsep \parsep}
|
| 266 |
+
\def\@listiii{\leftmargin\leftmarginiii
|
| 267 |
+
\labelwidth\leftmarginiii
|
| 268 |
+
\advance\labelwidth-\labelsep
|
| 269 |
+
\topsep 1\p@ \@plus 0.5\p@ \@minus 0.5\p@
|
| 270 |
+
\parsep \z@
|
| 271 |
+
\partopsep 0.5\p@ \@plus 0\p@ \@minus 0.5\p@
|
| 272 |
+
\itemsep \topsep}
|
| 273 |
+
\def\@listiv {\leftmargin\leftmarginiv
|
| 274 |
+
\labelwidth\leftmarginiv
|
| 275 |
+
\advance\labelwidth-\labelsep}
|
| 276 |
+
\def\@listv {\leftmargin\leftmarginv
|
| 277 |
+
\labelwidth\leftmarginv
|
| 278 |
+
\advance\labelwidth-\labelsep}
|
| 279 |
+
\def\@listvi {\leftmargin\leftmarginvi
|
| 280 |
+
\labelwidth\leftmarginvi
|
| 281 |
+
\advance\labelwidth-\labelsep}
|
| 282 |
+
|
| 283 |
+
% create title
|
| 284 |
+
\providecommand{\maketitle}{}
|
| 285 |
+
\renewcommand{\maketitle}{%
|
| 286 |
+
\par
|
| 287 |
+
\begingroup
|
| 288 |
+
\renewcommand{\thefootnote}{\fnsymbol{footnote}}
|
| 289 |
+
% for perfect author name centering
|
| 290 |
+
\renewcommand{\@makefnmark}{\hbox to \z@{$^{\@thefnmark}$\hss}}
|
| 291 |
+
% The footnote-mark was overlapping the footnote-text,
|
| 292 |
+
% added the following to fix this problem (MK)
|
| 293 |
+
\long\def\@makefntext##1{%
|
| 294 |
+
\parindent 1em\noindent
|
| 295 |
+
\hbox to 1.8em{\hss $\m@th ^{\@thefnmark}$}##1
|
| 296 |
+
}
|
| 297 |
+
\thispagestyle{empty}
|
| 298 |
+
\@maketitle
|
| 299 |
+
\@thanks
|
| 300 |
+
\@notice
|
| 301 |
+
\endgroup
|
| 302 |
+
\let\maketitle\relax
|
| 303 |
+
\let\thanks\relax
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
% rules for title box at top of first page
|
| 307 |
+
\newcommand{\@toptitlebar}{
|
| 308 |
+
\hrule height 4\p@
|
| 309 |
+
\vskip 0.25in
|
| 310 |
+
\vskip -\parskip%
|
| 311 |
+
}
|
| 312 |
+
\newcommand{\@bottomtitlebar}{
|
| 313 |
+
\vskip 0.29in
|
| 314 |
+
\vskip -\parskip
|
| 315 |
+
\hrule height 1\p@
|
| 316 |
+
\vskip 0.09in%
|
| 317 |
+
}
|
| 318 |
+
|
| 319 |
+
% create title (includes both anonymized and non-anonymized versions)
|
| 320 |
+
\providecommand{\@maketitle}{}
|
| 321 |
+
\renewcommand{\@maketitle}{%
|
| 322 |
+
\vbox{%
|
| 323 |
+
\hsize\textwidth
|
| 324 |
+
\linewidth\hsize
|
| 325 |
+
\vskip 0.1in
|
| 326 |
+
\@toptitlebar
|
| 327 |
+
\centering
|
| 328 |
+
{\LARGE\bf \@title\par}
|
| 329 |
+
\@bottomtitlebar
|
| 330 |
+
\if@anonymous
|
| 331 |
+
\begin{tabular}[t]{c}\bf\rule{\z@}{24\p@}
|
| 332 |
+
Anonymous Author(s) \\
|
| 333 |
+
Affiliation \\
|
| 334 |
+
Address \\
|
| 335 |
+
\texttt{email} \\
|
| 336 |
+
\end{tabular}%
|
| 337 |
+
\else
|
| 338 |
+
\def\And{%
|
| 339 |
+
\end{tabular}\hfil\linebreak[0]\hfil%
|
| 340 |
+
\begin{tabular}[t]{c}\bf\rule{\z@}{24\p@}\ignorespaces%
|
| 341 |
+
}
|
| 342 |
+
\def\AND{%
|
| 343 |
+
\end{tabular}\hfil\linebreak[4]\hfil%
|
| 344 |
+
\begin{tabular}[t]{c}\bf\rule{\z@}{24\p@}\ignorespaces%
|
| 345 |
+
}
|
| 346 |
+
\begin{tabular}[t]{c}\bf\rule{\z@}{24\p@}\@author\end{tabular}%
|
| 347 |
+
\fi
|
| 348 |
+
\vskip 0.3in \@minus 0.1in
|
| 349 |
+
}
|
| 350 |
+
}
|
| 351 |
+
|
| 352 |
+
% add conference notice to bottom of first page
|
| 353 |
+
\newcommand{\ftype@noticebox}{8}
|
| 354 |
+
\newcommand{\@notice}{%
|
| 355 |
+
% give a bit of extra room back to authors on first page
|
| 356 |
+
\enlargethispage{2\baselineskip}%
|
| 357 |
+
\@float{noticebox}[b]%
|
| 358 |
+
\footnotesize\@noticestring%
|
| 359 |
+
\end@float%
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
% abstract styling
|
| 363 |
+
\renewenvironment{abstract}%
|
| 364 |
+
{%
|
| 365 |
+
\vskip 0.075in%
|
| 366 |
+
\centerline%
|
| 367 |
+
{\large\bf Abstract}%
|
| 368 |
+
\vspace{0.5ex}%
|
| 369 |
+
\begin{quote}%
|
| 370 |
+
}
|
| 371 |
+
{
|
| 372 |
+
\par%
|
| 373 |
+
\end{quote}%
|
| 374 |
+
\vskip 1ex%
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
% For the paper checklist
|
| 378 |
+
\newcommand{\answerYes}[1][]{\textcolor{blue}{[Yes]#1}}
|
| 379 |
+
\newcommand{\answerNo}[1][]{\textcolor{orange}{[No]#1}}
|
| 380 |
+
\newcommand{\answerNA}[1][]{\textcolor{gray}{[N/A]#1}}
|
| 381 |
+
\newcommand{\answerTODO}[1][]{\textcolor{red}{\bf [TODO]}}
|
| 382 |
+
\newcommand{\justificationTODO}[1][]{\textcolor{red}{\bf [TODO]}}
|
| 383 |
+
|
| 384 |
+
% handle tweaks for camera-ready copy vs. submission copy
|
| 385 |
+
\if@preprint
|
| 386 |
+
\newcommand{\@noticestring}{%
|
| 387 |
+
Preprint.%
|
| 388 |
+
}
|
| 389 |
+
\else
|
| 390 |
+
\if@neuripsfinal
|
| 391 |
+
\newcommand{\@noticestring}{
|
| 392 |
+
\@trackname
|
| 393 |
+
}
|
| 394 |
+
\else
|
| 395 |
+
\newcommand{\@noticestring}{%
|
| 396 |
+
Submitted to \@neuripsordinal\/ Conference on Neural Information Processing Systems (NeurIPS \@neuripsyear). Do not distribute.%
|
| 397 |
+
}
|
| 398 |
+
|
| 399 |
+
% hide the acknowledgements
|
| 400 |
+
\NewEnviron{hide}{}
|
| 401 |
+
\let\ack\hide
|
| 402 |
+
\let\endack\endhide
|
| 403 |
+
|
| 404 |
+
% line numbers for submission
|
| 405 |
+
\RequirePackage{lineno}
|
| 406 |
+
\linenumbers
|
| 407 |
+
|
| 408 |
+
% fix incompatibilities between lineno and amsmath, if required, by
|
| 409 |
+
% transparently wrapping linenomath environments around amsmath
|
| 410 |
+
% environments
|
| 411 |
+
\AtBeginDocument{%
|
| 412 |
+
\@ifpackageloaded{amsmath}{%
|
| 413 |
+
\newcommand*\patchAmsMathEnvironmentForLineno[1]{%
|
| 414 |
+
\expandafter\let\csname old#1\expandafter\endcsname\csname #1\endcsname
|
| 415 |
+
\expandafter\let\csname oldend#1\expandafter\endcsname\csname end#1\endcsname
|
| 416 |
+
\renewenvironment{#1}%
|
| 417 |
+
{\linenomath\csname old#1\endcsname}%
|
| 418 |
+
{\csname oldend#1\endcsname\endlinenomath}%
|
| 419 |
+
}%
|
| 420 |
+
\newcommand*\patchBothAmsMathEnvironmentsForLineno[1]{%
|
| 421 |
+
\patchAmsMathEnvironmentForLineno{#1}%
|
| 422 |
+
\patchAmsMathEnvironmentForLineno{#1*}%
|
| 423 |
+
}%
|
| 424 |
+
\patchBothAmsMathEnvironmentsForLineno{equation}%
|
| 425 |
+
\patchBothAmsMathEnvironmentsForLineno{align}%
|
| 426 |
+
\patchBothAmsMathEnvironmentsForLineno{flalign}%
|
| 427 |
+
\patchBothAmsMathEnvironmentsForLineno{alignat}%
|
| 428 |
+
\patchBothAmsMathEnvironmentsForLineno{gather}%
|
| 429 |
+
\patchBothAmsMathEnvironmentsForLineno{multline}%
|
| 430 |
+
}
|
| 431 |
+
{}
|
| 432 |
+
}
|
| 433 |
+
\fi
|
| 434 |
+
\fi
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
\endinput
|
paper/neurips_2026.tex
ADDED
|
@@ -0,0 +1,493 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\documentclass{article}
|
| 2 |
+
|
| 3 |
+
% if you need to pass options to natbib, use, e.g.:
|
| 4 |
+
% \PassOptionsToPackage{numbers, compress}{natbib}
|
| 5 |
+
% before loading neurips_2026
|
| 6 |
+
|
| 7 |
+
% The authors should use one of these tracks.
|
| 8 |
+
% Before accepting by the NeurIPS conference, select one of the options below.
|
| 9 |
+
% 0. "default" for submission
|
| 10 |
+
\usepackage{neurips_2026}
|
| 11 |
+
% the "default" option is equal to the "main" option, which is used for the Main Track with double-blind reviewing.
|
| 12 |
+
% 1. "main" option is used for the Main Track
|
| 13 |
+
% \usepackage[main]{neurips_2026}
|
| 14 |
+
% 2. "position" option is used for the Position Paper Track
|
| 15 |
+
% \usepackage[position]{neurips_2026}
|
| 16 |
+
% 3. "eandd" option is used for the Evaluations & Datasets Track
|
| 17 |
+
% \usepackage[eandd]{neurips_2026}
|
| 18 |
+
% if you need to opt-in for a single-blind submission in the E&D track:
|
| 19 |
+
%\usepackage[eandd, nonanonymous]{neurips_2026}
|
| 20 |
+
% 4. "creativeai" option is used for the Creative AI Track
|
| 21 |
+
% \usepackage[creativeai]{neurips_2026}
|
| 22 |
+
% 5. "sglblindworkshop" option is used for the Workshop with single-blind reviewing
|
| 23 |
+
% \usepackage[sglblindworkshop]{neurips_2026}
|
| 24 |
+
% 6. "dblblindworkshop" option is used for the Workshop with double-blind reviewing
|
| 25 |
+
% \usepackage[dblblindworkshop]{neurips_2026}
|
| 26 |
+
|
| 27 |
+
% After being accepted, the authors should add "final" behind the track to compile a camera-ready version.
|
| 28 |
+
% 1. Main Track
|
| 29 |
+
% \usepackage[main, final]{neurips_2026}
|
| 30 |
+
% 2. Position Paper Track
|
| 31 |
+
% \usepackage[position, final]{neurips_2026}
|
| 32 |
+
% 3. Evaluations & Datasets Track
|
| 33 |
+
% \usepackage[eandd, final]{neurips_2026}
|
| 34 |
+
% 4. Creative AI Track
|
| 35 |
+
% \usepackage[creativeai, final]{neurips_2026}
|
| 36 |
+
% 5. Workshop with single-blind reviewing
|
| 37 |
+
% \usepackage[sglblindworkshop, final]{neurips_2026}
|
| 38 |
+
% 6. Workshop with double-blind reviewing
|
| 39 |
+
% \usepackage[dblblindworkshop, final]{neurips_2026}
|
| 40 |
+
% Note. For the workshop paper template, both \title{} and \workshoptitle{} are required, with the former indicating the paper title shown in the title and the latter indicating the workshop title displayed in the footnote.
|
| 41 |
+
% For workshops (5., 6.), the authors should add the name of the workshop, "\workshoptitle" command is used to set the workshop title.
|
| 42 |
+
% \workshoptitle{WORKSHOP TITLE}
|
| 43 |
+
|
| 44 |
+
% "preprint" option is used for arXiv or other preprint submissions
|
| 45 |
+
% \usepackage[preprint]{neurips_2026}
|
| 46 |
+
|
| 47 |
+
% to avoid loading the natbib package, add option nonatbib:
|
| 48 |
+
% \usepackage[nonatbib]{neurips_2026}
|
| 49 |
+
|
| 50 |
+
\usepackage[utf8]{inputenc} % allow utf-8 input
|
| 51 |
+
\usepackage[T1]{fontenc} % use 8-bit T1 fonts
|
| 52 |
+
\usepackage{hyperref} % hyperlinks
|
| 53 |
+
\usepackage{url} % simple URL typesetting
|
| 54 |
+
\usepackage{booktabs} % professional-quality tables
|
| 55 |
+
\usepackage{amsfonts} % blackboard math symbols
|
| 56 |
+
\usepackage{nicefrac} % compact symbols for 1/2, etc.
|
| 57 |
+
\usepackage{microtype} % microtypography
|
| 58 |
+
\usepackage{xcolor} % colors
|
| 59 |
+
|
| 60 |
+
% Note. For the workshop paper template, both \title{} and \workshoptitle{} are required, with the former indicating the paper title shown in the title and the latter indicating the workshop title displayed in the footnote.
|
| 61 |
+
\title{Formatting Instructions For NeurIPS 2026}
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
% The \author macro works with any number of authors. There are two commands
|
| 65 |
+
% used to separate the names and addresses of multiple authors: \And and \AND.
|
| 66 |
+
%
|
| 67 |
+
% Using \And between authors leaves it to LaTeX to determine where to break the
|
| 68 |
+
% lines. Using \AND forces a line break at that point. So, if LaTeX puts 3 of 4
|
| 69 |
+
% authors names on the first line, and the last on the second line, try using
|
| 70 |
+
% \AND instead of \And before the third author name.
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
\author{%
|
| 74 |
+
David S.~Hippocampus\thanks{Use footnote for providing further information
|
| 75 |
+
about author (webpage, alternative address)---\emph{not} for acknowledging
|
| 76 |
+
funding agencies.} \\
|
| 77 |
+
Department of Computer Science\\
|
| 78 |
+
Cranberry-Lemon University\\
|
| 79 |
+
Pittsburgh, PA 15213 \\
|
| 80 |
+
\texttt{hippo@cs.cranberry-lemon.edu} \\
|
| 81 |
+
% examples of more authors
|
| 82 |
+
% \And
|
| 83 |
+
% Coauthor \\
|
| 84 |
+
% Affiliation \\
|
| 85 |
+
% Address \\
|
| 86 |
+
% \texttt{email} \\
|
| 87 |
+
% \AND
|
| 88 |
+
% Coauthor \\
|
| 89 |
+
% Affiliation \\
|
| 90 |
+
% Address \\
|
| 91 |
+
% \texttt{email} \\
|
| 92 |
+
% \And
|
| 93 |
+
% Coauthor \\
|
| 94 |
+
% Affiliation \\
|
| 95 |
+
% Address \\
|
| 96 |
+
% \texttt{email} \\
|
| 97 |
+
% \And
|
| 98 |
+
% Coauthor \\
|
| 99 |
+
% Affiliation \\
|
| 100 |
+
% Address \\
|
| 101 |
+
% \texttt{email} \\
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
\begin{document}
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
\maketitle
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
\begin{abstract}
|
| 112 |
+
The abstract paragraph should be indented \nicefrac{1}{2}~inch (3~picas) on both the left- and right-hand margins. Use 10~point type, with a vertical spacing (leading) of 11~points. The word \textbf{Abstract} must be centered, bold, and in point size 12. Two line spaces precede the abstract. The abstract must be limited to one paragraph.
|
| 113 |
+
\end{abstract}
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
\section{Submission of papers to NeurIPS 2026}
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
Please read the instructions below carefully and follow them faithfully.
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
\subsection{Style}
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
Papers to be submitted to NeurIPS 2026 must be prepared according to the
|
| 127 |
+
instructions presented here. Papers may only be up to {\bf nine} pages long, including figures. \textbf{Papers that exceed the page limit will not be reviewed (or in any other way considered) for presentation at the conference.}
|
| 128 |
+
Additional pages \emph{containing acknowledgments, references, checklist, and optional technical appendices} do not count as content pages.
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
The margins in 2026 are the same as those in previous years.
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
Authors are required to use the NeurIPS \LaTeX{} style files obtainable at the NeurIPS website as indicated below. Please make sure you use the current files and not previous versions. Tweaking the style files may be grounds for desk rejection.
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
\subsection{Retrieval of style files}
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
The style files for NeurIPS and other conference information are available on the website at
|
| 141 |
+
\begin{center}
|
| 142 |
+
\url{https://neurips.cc}.
|
| 143 |
+
\end{center}
|
| 144 |
+
% The file \verb+neurips_2026.pdf+ contains these instructions and illustrates the various formatting requirements your NeurIPS paper must satisfy.
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
The only supported style file for NeurIPS 2026 is \verb+neurips_2026.sty+, rewritten for \LaTeXe{}. \textbf{Previous style files for \LaTeX{} 2.09, Microsoft Word, and RTF are no longer supported.}
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
The \LaTeX{} style file contains three optional arguments:
|
| 151 |
+
\begin{itemize}
|
| 152 |
+
\item \verb+final+, which creates a camera-ready copy,
|
| 153 |
+
\item \verb+preprint+, which creates a preprint for submission to, e.g., arXiv, \item \verb+nonatbib+, which will not load the \verb+natbib+ package for you in case of package clash.
|
| 154 |
+
\end{itemize}
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
\paragraph{Preprint option}
|
| 158 |
+
If you wish to post a preprint of your work online, e.g., on arXiv, using the NeurIPS style, please use the \verb+preprint+ option. This will create a nonanonymized version of your work with the text ``Preprint. Work in progress.'' in the footer. This version may be distributed as you see fit, as long as you do not say which conference it was submitted to. Please \textbf{do not} use the \verb+final+ option, which should \textbf{only} be used for papers accepted to NeurIPS.
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
At submission time, please omit the \verb+final+ and \verb+preprint+ options. This will anonymize your submission and add line numbers to aid review. Please do \emph{not} refer to these line numbers in your paper as they will be removed during generation of camera-ready copies.
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
The file \verb+neurips_2026.tex+ may be used as a ``shell'' for writing your paper. All you have to do is replace the author, title, abstract, and text of the paper with your own.
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
The formatting instructions contained in these style files are summarized in Sections \ref{gen_inst}, \ref{headings}, and \ref{others} below.
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
\section{General formatting instructions}
|
| 171 |
+
\label{gen_inst}
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
The text must be confined within a rectangle 5.5~inches (33~picas) wide and
|
| 175 |
+
9~inches (54~picas) long. The left margin is 1.5~inch (9~picas). Use 10~point
|
| 176 |
+
type with a vertical spacing (leading) of 11~points. Times New Roman is the
|
| 177 |
+
preferred typeface throughout, and will be selected for you by default.
|
| 178 |
+
Paragraphs are separated by \nicefrac{1}{2}~line space (5.5 points), with no
|
| 179 |
+
indentation.
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
The paper title should be 17~point, initial caps/lower case, bold, centered
|
| 183 |
+
between two horizontal rules. The top rule should be 4~points thick and the
|
| 184 |
+
bottom rule should be 1~point thick. Allow \nicefrac{1}{4}~inch space above and
|
| 185 |
+
below the title to rules. All pages should start at 1~inch (6~picas) from the
|
| 186 |
+
top of the page.
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
For the final version, authors' names are set in boldface, and each name is
|
| 190 |
+
centered above the corresponding address. The lead author's name is to be listed
|
| 191 |
+
first (left-most), and the co-authors' names (if different address) are set to
|
| 192 |
+
follow. If there is only one co-author, list both author and co-author side by
|
| 193 |
+
side.
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
Please pay special attention to the instructions in Section \ref{others}
|
| 197 |
+
regarding figures, tables, acknowledgments, and references.
|
| 198 |
+
|
| 199 |
+
\section{Headings: first level}
|
| 200 |
+
\label{headings}
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
All headings should be lower case (except for first word and proper nouns),
|
| 204 |
+
flush left, and bold.
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
First-level headings should be in 12-point type.
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
\subsection{Headings: second level}
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
Second-level headings should be in 10-point type.
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
\subsubsection{Headings: third level}
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
Third-level headings should be in 10-point type.
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
\paragraph{Paragraphs}
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
There is also a \verb+\paragraph+ command available, which sets the heading in
|
| 226 |
+
bold, flush left, and inline with the text, with the heading followed by 1\,em
|
| 227 |
+
of space.
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
\section{Citations, figures, tables, references}
|
| 231 |
+
\label{others}
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
These instructions apply to everyone.
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
\subsection{Citations within the text}
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
The \verb+natbib+ package will be loaded for you by default. Citations may be
|
| 241 |
+
author/year or numeric, as long as you maintain internal consistency. As to the
|
| 242 |
+
format of the references themselves, any style is acceptable as long as it is
|
| 243 |
+
used consistently.
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
The documentation for \verb+natbib+ may be found at
|
| 247 |
+
\begin{center}
|
| 248 |
+
\url{http://mirrors.ctan.org/macros/latex/contrib/natbib/natnotes.pdf}
|
| 249 |
+
\end{center}
|
| 250 |
+
Of note is the command \verb+\citet+, which produces citations appropriate for
|
| 251 |
+
use in inline text. For example,
|
| 252 |
+
\begin{verbatim}
|
| 253 |
+
\citet{hasselmo} investigated\dots
|
| 254 |
+
\end{verbatim}
|
| 255 |
+
produces
|
| 256 |
+
\begin{quote}
|
| 257 |
+
Hasselmo, et al.\ (1995) investigated\dots
|
| 258 |
+
\end{quote}
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
If you wish to load the \verb+natbib+ package with options, you may add the
|
| 262 |
+
following before loading the \verb+neurips_2026+ package:
|
| 263 |
+
\begin{verbatim}
|
| 264 |
+
\PassOptionsToPackage{options}{natbib}
|
| 265 |
+
\end{verbatim}
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
If \verb+natbib+ clashes with another package you load, you can add the optional
|
| 269 |
+
argument \verb+nonatbib+ when loading the style file:
|
| 270 |
+
\begin{verbatim}
|
| 271 |
+
\usepackage[nonatbib]{neurips_2026}
|
| 272 |
+
\end{verbatim}
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
As submission is double blind, refer to your own published work in the third
|
| 276 |
+
person. That is, use ``In the previous work of Jones et al.\ [4],'' not ``In our
|
| 277 |
+
previous work [4].'' If you cite your other papers that are not widely available
|
| 278 |
+
(e.g., a journal paper under review), use anonymous author names in the
|
| 279 |
+
citation, e.g., an author of the form ``A.\ Anonymous'' and include a copy of the anonymized paper in the supplementary material.
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
\subsection{Footnotes}
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
Footnotes should be used sparingly. If you do require a footnote, indicate
|
| 286 |
+
footnotes with a number\footnote{Sample of the first footnote.} in the
|
| 287 |
+
text. Place the footnotes at the bottom of the page on which they appear.
|
| 288 |
+
Precede the footnote with a horizontal rule of 2~inches (12~picas).
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
Note that footnotes are properly typeset \emph{after} punctuation
|
| 292 |
+
marks.\footnote{As in this example.}
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
\subsection{Figures}
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
\begin{figure}
|
| 299 |
+
\centering
|
| 300 |
+
\fbox{\rule[-.5cm]{0cm}{4cm} \rule[-.5cm]{4cm}{0cm}}
|
| 301 |
+
\caption{Sample figure caption. Explain what the figure shows and add a key take-away message to the caption.}
|
| 302 |
+
\end{figure}
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
All artwork must be neat, clean, and legible. Lines should be dark enough for
|
| 306 |
+
reproduction purposes. The figure number and caption always appear after the
|
| 307 |
+
figure. Place one line space before the figure caption and one line space after
|
| 308 |
+
the figure. The figure caption should be lower case (except for the first word and proper nouns); figures are numbered consecutively.
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
You may use color figures. However, it is best for the figure captions and the
|
| 312 |
+
paper body to be legible if the paper is printed in either black/white or in
|
| 313 |
+
color.
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
\subsection{Tables}
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
All tables must be centered, neat, clean, and legible. The table number and
|
| 320 |
+
title always appear before the table. See Table~\ref{sample-table}.
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
Place one line space before the table title, one line space after the
|
| 324 |
+
table title, and one line space after the table. The table title must
|
| 325 |
+
be lower case (except for the first word and proper nouns); tables are
|
| 326 |
+
numbered consecutively.
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
Note that publication-quality tables \emph{do not contain vertical rules}. We
|
| 330 |
+
strongly suggest the use of the \verb+booktabs+ package, which allows for
|
| 331 |
+
typesetting high-quality, professional tables:
|
| 332 |
+
\begin{center}
|
| 333 |
+
\url{https://www.ctan.org/pkg/booktabs}
|
| 334 |
+
\end{center}
|
| 335 |
+
This package was used to typeset Table~\ref{sample-table}.
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
\begin{table}
|
| 339 |
+
\caption{Sample table caption. Explain what the table shows and add a key take-away message to the caption.}
|
| 340 |
+
\label{sample-table}
|
| 341 |
+
\centering
|
| 342 |
+
\begin{tabular}{lll}
|
| 343 |
+
\toprule
|
| 344 |
+
\multicolumn{2}{c}{Part} \\
|
| 345 |
+
\cmidrule(r){1-2}
|
| 346 |
+
Name & Description & Size ($\mu$m) \\
|
| 347 |
+
\midrule
|
| 348 |
+
Dendrite & Input terminal & $\approx$100 \\
|
| 349 |
+
Axon & Output terminal & $\approx$10 \\
|
| 350 |
+
Soma & Cell body & up to $10^6$ \\
|
| 351 |
+
\bottomrule
|
| 352 |
+
\end{tabular}
|
| 353 |
+
\end{table}
|
| 354 |
+
|
| 355 |
+
\subsection{Math}
|
| 356 |
+
Note that display math in bare TeX commands will not create correct line numbers for submission. Please use LaTeX (or AMSTeX) commands for unnumbered display math. (You really shouldn't be using \$\$ anyway; see \url{https://tex.stackexchange.com/questions/503/why-is-preferable-to} and \url{https://tex.stackexchange.com/questions/40492/what-are-the-differences-between-align-equation-and-displaymath} for more information.)
|
| 357 |
+
|
| 358 |
+
\subsection{Final instructions}
|
| 359 |
+
|
| 360 |
+
Do not change any aspects of the formatting parameters in the style files. In
|
| 361 |
+
particular, do not modify the width or length of the rectangle the text should
|
| 362 |
+
fit into, and do not change font sizes. Please note that pages should be
|
| 363 |
+
numbered.
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
\section{Preparing PDF files}
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
Please prepare submission files with paper size ``US Letter,'' and not, for
|
| 370 |
+
example, ``A4.''
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
Fonts were the main cause of problems in the past years. Your PDF file must only
|
| 374 |
+
contain Type 1 or Embedded TrueType fonts. Here are a few instructions to
|
| 375 |
+
achieve this.
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
\begin{itemize}
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
\item You should directly generate PDF files using \verb+pdflatex+.
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
\item You can check which fonts a PDF files uses. In Acrobat Reader, select the
|
| 385 |
+
menu Files$>$Document Properties$>$Fonts and select Show All Fonts. You can
|
| 386 |
+
also use the program \verb+pdffonts+ which comes with \verb+xpdf+ and is
|
| 387 |
+
available out-of-the-box on most Linux machines.
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
\item \verb+xfig+ ``patterned'' shapes are implemented with bitmap fonts. Use
|
| 391 |
+
"solid" shapes instead.
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
\item The \verb+\bbold+ package almost always uses bitmap fonts. You should use
|
| 395 |
+
the equivalent AMS Fonts:
|
| 396 |
+
\begin{verbatim}
|
| 397 |
+
\usepackage{amsfonts}
|
| 398 |
+
\end{verbatim}
|
| 399 |
+
followed by, e.g., \verb+\mathbb{R}+, \verb+\mathbb{N}+, or \verb+\mathbb{C}+
|
| 400 |
+
for $\mathbb{R}$, $\mathbb{N}$ or $\mathbb{C}$. You can also use the following
|
| 401 |
+
workaround for reals, natural and complex:
|
| 402 |
+
\begin{verbatim}
|
| 403 |
+
\newcommand{\RR}{I\!\!R} %real numbers
|
| 404 |
+
\newcommand{\Nat}{I\!\!N} %natural numbers
|
| 405 |
+
\newcommand{\CC}{I\!\!\!\!C} %complex numbers
|
| 406 |
+
\end{verbatim}
|
| 407 |
+
Note that \verb+amsfonts+ is automatically loaded by the \verb+amssymb+ package.
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
\end{itemize}
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
If your file contains type 3 fonts or non embedded TrueType fonts, we will ask
|
| 414 |
+
you to fix it.
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
\subsection{Margins in \LaTeX{}}
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
Most of the margin problems come from figures positioned by hand using
|
| 421 |
+
\verb+\special+ or other commands. We suggest using the command
|
| 422 |
+
\verb+\includegraphics+ from the \verb+graphicx+ package. Always specify the
|
| 423 |
+
figure width as a multiple of the line width as in the example below:
|
| 424 |
+
\begin{verbatim}
|
| 425 |
+
\usepackage[pdftex]{graphicx} ...
|
| 426 |
+
\includegraphics[width=0.8\linewidth]{myfile.pdf}
|
| 427 |
+
\end{verbatim}
|
| 428 |
+
See Section 4.4 in the graphics bundle documentation
|
| 429 |
+
(\url{http://mirrors.ctan.org/macros/latex/required/graphics/grfguide.pdf})
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
A number of width problems arise when \LaTeX{} cannot properly hyphenate a
|
| 433 |
+
line. Please give LaTeX hyphenation hints using the \verb+\-+ command when
|
| 434 |
+
necessary.
|
| 435 |
+
|
| 436 |
+
\begin{ack}
|
| 437 |
+
Use unnumbered first level headings for the acknowledgments. All acknowledgments
|
| 438 |
+
go at the end of the paper before the list of references. Moreover, you are required to declare
|
| 439 |
+
funding (financial activities supporting the submitted work) and competing interests (related financial activities outside the submitted work).
|
| 440 |
+
More information about this disclosure can be found at: \url{https://neurips.cc/Conferences/2026/PaperInformation/FundingDisclosure}.
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
Do {\bf not} include this section in the anonymized submission, only in the final paper. You can use the \texttt{ack} environment provided in the style file to automatically hide this section in the anonymized submission.
|
| 444 |
+
\end{ack}
|
| 445 |
+
|
| 446 |
+
\section*{References}
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
References follow the acknowledgments in the camera-ready paper. Use unnumbered first-level heading for
|
| 450 |
+
the references. Any choice of citation style is acceptable as long as you are
|
| 451 |
+
consistent. It is permissible to reduce the font size to \verb+small+ (9 point)
|
| 452 |
+
when listing the references.
|
| 453 |
+
Note that the Reference section does not count towards the page limit.
|
| 454 |
+
\medskip
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
{
|
| 458 |
+
\small
|
| 459 |
+
|
| 460 |
+
|
| 461 |
+
[1] Alexander, J.A.\ \& Mozer, M.C.\ (1995) Template-based algorithms for
|
| 462 |
+
connectionist rule extraction. In G.\ Tesauro, D.S.\ Touretzky and T.K.\ Leen
|
| 463 |
+
(eds.), {\it Advances in Neural Information Processing Systems 7},
|
| 464 |
+
pp.\ 609--616. Cambridge, MA: MIT Press.
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
[2] Bower, J.M.\ \& Beeman, D.\ (1995) {\it The Book of GENESIS: Exploring
|
| 468 |
+
Realistic Neural Models with the GEneral NEural SImulation System.} New York:
|
| 469 |
+
TELOS/Springer--Verlag.
|
| 470 |
+
|
| 471 |
+
|
| 472 |
+
[3] Hasselmo, M.E., Schnell, E.\ \& Barkai, E.\ (1995) Dynamics of learning and
|
| 473 |
+
recall at excitatory recurrent synapses and cholinergic modulation in rat
|
| 474 |
+
hippocampal region CA3. {\it Journal of Neuroscience} {\bf 15}(7):5249-5262.
|
| 475 |
+
}
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
| 479 |
+
|
| 480 |
+
\appendix
|
| 481 |
+
|
| 482 |
+
\section{Technical appendices and supplementary material}
|
| 483 |
+
Technical appendices with additional results, figures, graphs, and proofs may be submitted with the paper submission before the full submission deadline (see above). You can upload a ZIP file for videos or code, but do not upload a separate PDF file for the appendix. There is no page limit for the technical appendices.
|
| 484 |
+
|
| 485 |
+
Note: Think of the appendix as ``optional reading'' for reviewers. The paper must be able to stand alone without the appendix; for example, adding critical experiments that support the main claims to an appendix is inappropriate.
|
| 486 |
+
|
| 487 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
| 488 |
+
|
| 489 |
+
\newpage
|
| 490 |
+
\input{checklist.tex}
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
\end{document}
|
paper/references.bib
ADDED
|
@@ -0,0 +1,463 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
% ===== Negative Results & Publication Bias =====
|
| 2 |
+
|
| 3 |
+
@article{fanelli2012negative,
|
| 4 |
+
title={Negative results are disappearing from most disciplines and countries},
|
| 5 |
+
author={Fanelli, Daniele},
|
| 6 |
+
journal={Scientometrics},
|
| 7 |
+
volume={90},
|
| 8 |
+
number={3},
|
| 9 |
+
pages={891--904},
|
| 10 |
+
year={2012},
|
| 11 |
+
publisher={Springer}
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
@article{mlinaric2017dealing,
|
| 15 |
+
title={Dealing with the positive publication bias: Why you should really publish your negative results},
|
| 16 |
+
author={Mlinaric, Ana and Horvat, Martina and Smolcic, Vesna Supak},
|
| 17 |
+
journal={Biochemia medica},
|
| 18 |
+
volume={27},
|
| 19 |
+
number={3},
|
| 20 |
+
pages={030201},
|
| 21 |
+
year={2017}
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
% ===== DTI Benchmarks & Databases =====
|
| 25 |
+
|
| 26 |
+
@article{gaulton2017chembl,
|
| 27 |
+
title={{ChEMBL}: a large-scale bioactivity database for drug discovery},
|
| 28 |
+
author={Gaulton, Anna and Hersey, Anne and Nowotka, Micha{\l} and Bento, A Patr{\'\i}cia and Chambers, Jon and Mendez, David and Mutowo, Prudence and Atkinson, Francis and Bellis, Louisa J and Cibri{\'a}n-Uhalte, Elena and others},
|
| 29 |
+
journal={Nucleic Acids Research},
|
| 30 |
+
volume={45},
|
| 31 |
+
number={D1},
|
| 32 |
+
pages={D986--D994},
|
| 33 |
+
year={2017},
|
| 34 |
+
publisher={Oxford University Press}
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
@article{kim2023pubchem,
|
| 38 |
+
title={{PubChem} 2023 update},
|
| 39 |
+
author={Kim, Sunghwan and Chen, Jie and Cheng, Tiejun and Gindulyte, Asta and He, Jia and He, Siqian and Li, Qingliang and Shoemaker, Benjamin A and Thiessen, Paul A and Yu, Bo and others},
|
| 40 |
+
journal={Nucleic Acids Research},
|
| 41 |
+
volume={51},
|
| 42 |
+
number={D1},
|
| 43 |
+
pages={D1373--D1380},
|
| 44 |
+
year={2023},
|
| 45 |
+
publisher={Oxford University Press}
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
@article{gilson2016bindingdb,
|
| 49 |
+
title={{BindingDB} in 2015: a public database for medicinal chemistry, computational chemistry and systems pharmacology},
|
| 50 |
+
author={Gilson, Michael K and Liu, Tiqing and Baitaluk, Michael and Nicola, George and Hwang, Linda and Chong, Jenny},
|
| 51 |
+
journal={Nucleic Acids Research},
|
| 52 |
+
volume={44},
|
| 53 |
+
number={D1},
|
| 54 |
+
pages={D1045--D1053},
|
| 55 |
+
year={2016},
|
| 56 |
+
publisher={Oxford University Press}
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
@article{davis2011comprehensive,
|
| 60 |
+
title={Comprehensive analysis of kinase inhibitor selectivity},
|
| 61 |
+
author={Davis, Mindy I and Hunt, Jeremy P and Herrgard, Sune and Ciceri, Pietro and Wodicka, Lisa M and Pallares, Gabriel and Hocker, Michael and Treiber, Daniel K and Zarrinkar, Patrick P},
|
| 62 |
+
journal={Nature Biotechnology},
|
| 63 |
+
volume={29},
|
| 64 |
+
number={11},
|
| 65 |
+
pages={1046--1051},
|
| 66 |
+
year={2011},
|
| 67 |
+
publisher={Nature Publishing Group}
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
@article{huang2021therapeutics,
|
| 71 |
+
title={Therapeutics Data Commons: Machine Learning Datasets and Tasks for Drug Discovery and Development},
|
| 72 |
+
author={Huang, Kexin and Fu, Tianfan and Gao, Wenhao and Zhao, Yue and Roohani, Yusuf and Leskovec, Jure and Coley, Connor W and Xiao, Cao and Sun, Jimeng and Zitnik, Marinka},
|
| 73 |
+
journal={Proceedings of NeurIPS Datasets and Benchmarks},
|
| 74 |
+
year={2021}
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
@article{mysinger2012dude,
|
| 78 |
+
title={Directory of useful decoys, enhanced ({DUD-E}): better ligands and decoys for better benchmarking},
|
| 79 |
+
author={Mysinger, Michael M and Carchia, Michael and Irwin, John J and Shoichet, Brian K},
|
| 80 |
+
journal={Journal of Medicinal Chemistry},
|
| 81 |
+
volume={55},
|
| 82 |
+
number={14},
|
| 83 |
+
pages={6582--6594},
|
| 84 |
+
year={2012},
|
| 85 |
+
publisher={ACS Publications}
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
@article{wu2018moleculenet,
|
| 89 |
+
title={{MoleculeNet}: a benchmark for molecular machine learning},
|
| 90 |
+
author={Wu, Zhenqin and Ramsundar, Bharath and Feinberg, Evan N and Gomes, Joseph and Geniesse, Caleb and Pappu, Aneesh S and Leswing, Karl and Pande, Vijay},
|
| 91 |
+
journal={Chemical Science},
|
| 92 |
+
volume={9},
|
| 93 |
+
number={2},
|
| 94 |
+
pages={513--530},
|
| 95 |
+
year={2018},
|
| 96 |
+
publisher={Royal Society of Chemistry}
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
@article{tran2020litpcba,
|
| 100 |
+
title={Lit-{PCBA}: An unbiased data set for machine learning and virtual screening},
|
| 101 |
+
author={Tran-Nguyen, Viet-Khoa and Jacquemard, Christophe and Rognan, Didier},
|
| 102 |
+
journal={Journal of Chemical Information and Modeling},
|
| 103 |
+
volume={60},
|
| 104 |
+
number={9},
|
| 105 |
+
pages={4263--4273},
|
| 106 |
+
year={2020},
|
| 107 |
+
publisher={ACS Publications}
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
@article{li2025evidti,
|
| 111 |
+
title={Negative is positive: on the role of negative evidence in drug-target interaction prediction},
|
| 112 |
+
author={Li, Jianmin and others},
|
| 113 |
+
journal={Briefings in Bioinformatics},
|
| 114 |
+
year={2025},
|
| 115 |
+
note={To appear}
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
@inproceedings{volkov2025welqrate,
|
| 119 |
+
title={{WelQrate}: Defining the gold standard in small molecule drug discovery benchmarking},
|
| 120 |
+
author={Volkov, Maxim and Fl{\"o}ge, Joseph and Stolte, Markus and others},
|
| 121 |
+
booktitle={Advances in Neural Information Processing Systems},
|
| 122 |
+
year={2025}
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
% ===== DTI Models =====
|
| 126 |
+
|
| 127 |
+
@article{ozturk2018deepdta,
|
| 128 |
+
title={{DeepDTA}: deep drug--target binding affinity prediction},
|
| 129 |
+
author={{\"O}zt{\"u}rk, Hakime and {\"O}zg{\"u}r, Arzucan and Ozkirimli, Elif},
|
| 130 |
+
journal={Bioinformatics},
|
| 131 |
+
volume={34},
|
| 132 |
+
number={17},
|
| 133 |
+
pages={i821--i829},
|
| 134 |
+
year={2018},
|
| 135 |
+
publisher={Oxford University Press}
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
@article{nguyen2021graphdta,
|
| 139 |
+
title={{GraphDTA}: predicting drug--target binding affinity with graph neural networks},
|
| 140 |
+
author={Nguyen, Thin and Le, Hang and Quinn, Thomas P and Nguyen, Tri and Le, Thuc Duy and Venkatesh, Svetha},
|
| 141 |
+
journal={Bioinformatics},
|
| 142 |
+
volume={37},
|
| 143 |
+
number={8},
|
| 144 |
+
pages={1140--1147},
|
| 145 |
+
year={2021},
|
| 146 |
+
publisher={Oxford University Press}
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
@article{bai2023drugban,
|
| 150 |
+
title={Interpretable bilinear attention network with domain adaptation improves drug--target prediction},
|
| 151 |
+
author={Bai, Peizhen and Miljkovi{\'c}, Filip and John, Bino and Lu, Haiping},
|
| 152 |
+
journal={Nature Machine Intelligence},
|
| 153 |
+
volume={5},
|
| 154 |
+
number={2},
|
| 155 |
+
pages={126--136},
|
| 156 |
+
year={2023},
|
| 157 |
+
publisher={Nature Publishing Group}
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
% ===== Clinical Trial Databases =====
|
| 161 |
+
|
| 162 |
+
@article{tasneem2012aact,
|
| 163 |
+
title={The database for aggregate analysis of {ClinicalTrials.gov} ({AACT}) and subsequent regrouping by clinical specialty},
|
| 164 |
+
author={Tasneem, Asba and Aberle, Laura and Ananber, Hari and Chakraborty, Swati and Chiswell, Karen and McCourt, Brian J and Pietrobon, Ricardo},
|
| 165 |
+
journal={PLoS ONE},
|
| 166 |
+
volume={7},
|
| 167 |
+
number={3},
|
| 168 |
+
pages={e33677},
|
| 169 |
+
year={2012}
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
@article{fu2022hint,
|
| 173 |
+
title={{HINT}: Hierarchical interaction network for clinical trial outcome prediction},
|
| 174 |
+
author={Fu, Tianfan and Huang, Kexin and Xiao, Cao and Glass, Lucas M and Sun, Jimeng},
|
| 175 |
+
journal={Patterns},
|
| 176 |
+
volume={3},
|
| 177 |
+
number={4},
|
| 178 |
+
pages={100445},
|
| 179 |
+
year={2022},
|
| 180 |
+
publisher={Elsevier}
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
@article{siah2021cto,
|
| 184 |
+
title={Predicting drug approvals: the {Novartis} data science and artificial intelligence challenge},
|
| 185 |
+
author={Siah, Kien Wei and Kelley, Nicholas W and Engstrom, Steinar and Abi Jaoude, Joseph and Cook, Andrew R and Lo, Andrew W},
|
| 186 |
+
journal={Patterns},
|
| 187 |
+
volume={2},
|
| 188 |
+
number={8},
|
| 189 |
+
pages={100312},
|
| 190 |
+
year={2021},
|
| 191 |
+
publisher={Elsevier}
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
@article{shi2024safety,
|
| 195 |
+
title={Safety and efficacy outcomes in clinical trials with negative results},
|
| 196 |
+
author={Shi, Yu and Du, Jingcheng},
|
| 197 |
+
journal={Drug Safety},
|
| 198 |
+
year={2024},
|
| 199 |
+
publisher={Springer}
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
@misc{opentargets2024,
|
| 203 |
+
title={Open Targets Platform},
|
| 204 |
+
author={{Open Targets Consortium}},
|
| 205 |
+
year={2024},
|
| 206 |
+
howpublished={\url{https://platform.opentargets.org/}}
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
% ===== PPI Databases =====
|
| 210 |
+
|
| 211 |
+
@article{luck2020huri,
|
| 212 |
+
title={A reference map of the human binary protein interactome},
|
| 213 |
+
author={Luck, Katja and Kim, Dae-Kyum and Lambourne, Luke and Spirohn, Kerstin and Begg, Bridget E and Bian, Wenting and Brber, Ruth and Bridges, Nora and Cho, Sohyun and others},
|
| 214 |
+
journal={Nature},
|
| 215 |
+
volume={580},
|
| 216 |
+
number={7803},
|
| 217 |
+
pages={402--408},
|
| 218 |
+
year={2020},
|
| 219 |
+
publisher={Nature Publishing Group}
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
@article{orchard2014intact,
|
| 223 |
+
title={The {MIntAct} project---{IntAct} as a common curation platform for 11 molecular interaction databases},
|
| 224 |
+
author={Orchard, Sandra and Ammari, Mais and Aranda, Bruno and Breuza, Lionel and Briganti, Leonardo and Broackes-Carter, Fiona and others},
|
| 225 |
+
journal={Nucleic Acids Research},
|
| 226 |
+
volume={42},
|
| 227 |
+
number={D1},
|
| 228 |
+
pages={D358--D363},
|
| 229 |
+
year={2014},
|
| 230 |
+
publisher={Oxford University Press}
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
@article{drew2021humap,
|
| 234 |
+
title={hu.{MAP} 2.0: integration of over 15,000 proteomic experiments builds a global compendium of human multiprotein assemblies},
|
| 235 |
+
author={Drew, Kevin and Wallingford, John B and Marcotte, Edward M},
|
| 236 |
+
journal={Molecular Systems Biology},
|
| 237 |
+
volume={17},
|
| 238 |
+
number={5},
|
| 239 |
+
pages={e10016},
|
| 240 |
+
year={2021},
|
| 241 |
+
publisher={EMBO Press}
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
@article{szklarczyk2023string,
|
| 245 |
+
title={{STRING} v12.0: protein-protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
|
| 246 |
+
author={Szklarczyk, Damian and Kirsch, Rebecca and Koutrouli, Mikaela and Nastou, Katerina and Mehryary, Farrokh and Hachilif, Radja and Gable, Annika L and Fang, Tao and Doncheva, Nadezhda T and Pyysalo, Sampo and others},
|
| 247 |
+
journal={Nucleic Acids Research},
|
| 248 |
+
volume={51},
|
| 249 |
+
number={D1},
|
| 250 |
+
pages={D483--D489},
|
| 251 |
+
year={2023},
|
| 252 |
+
publisher={Oxford University Press}
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
@article{uniprot2023,
|
| 256 |
+
title={The {UniProt} Consortium. {UniProt}: the Universal Protein Knowledgebase in 2023},
|
| 257 |
+
author={{UniProt Consortium}},
|
| 258 |
+
journal={Nucleic Acids Research},
|
| 259 |
+
volume={51},
|
| 260 |
+
number={D1},
|
| 261 |
+
pages={D523--D531},
|
| 262 |
+
year={2023}
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
% ===== LLM Benchmarks =====
|
| 266 |
+
|
| 267 |
+
@article{mirza2024chembench,
|
| 268 |
+
title={{ChemBench}: A large-scale benchmark for chemical reasoning in language models},
|
| 269 |
+
author={Mirza, Adrian and others},
|
| 270 |
+
journal={Advances in Neural Information Processing Systems},
|
| 271 |
+
year={2024}
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
@article{fang2024molinstructions,
|
| 275 |
+
title={Mol-Instructions: A large-scale biomolecular instruction dataset for large language models},
|
| 276 |
+
author={Fang, Yin and Liang, Xiaozhuo and Zhang, Ningyu and Liu, Kangwei and Huang, Rui and Chen, Zhuo and Fan, Xiaohui and Chen, Huajun},
|
| 277 |
+
journal={Proceedings of ICLR},
|
| 278 |
+
year={2024}
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
@article{jin2021medqa,
|
| 282 |
+
title={What disease does this patient have? {A} large-scale open domain question answering dataset from medical exams},
|
| 283 |
+
author={Jin, Di and Pan, Eileen and Oufattole, Nassim and Weng, Wei-Hung and Fang, Hanyi and Szolovits, Peter},
|
| 284 |
+
journal={Applied Sciences},
|
| 285 |
+
volume={11},
|
| 286 |
+
number={14},
|
| 287 |
+
pages={6421},
|
| 288 |
+
year={2021}
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
@article{laurent2024labbench,
|
| 292 |
+
title={{LAB-Bench}: Measuring capabilities of language models for biology research},
|
| 293 |
+
author={Laurent, Jon M and Gershon, Jo{\~a}o and others},
|
| 294 |
+
journal={arXiv preprint arXiv:2407.10362},
|
| 295 |
+
year={2024}
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
% ===== LLM Models =====
|
| 299 |
+
|
| 300 |
+
@article{dubey2024llama3,
|
| 301 |
+
title={The {Llama} 3 herd of models},
|
| 302 |
+
author={Dubey, Abhimanyu and others},
|
| 303 |
+
journal={arXiv preprint arXiv:2407.21783},
|
| 304 |
+
year={2024}
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
@article{yang2024qwen2,
|
| 308 |
+
title={{Qwen2} technical report},
|
| 309 |
+
author={Yang, An and others},
|
| 310 |
+
journal={arXiv preprint arXiv:2407.10671},
|
| 311 |
+
year={2024}
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
@misc{openai2024gpt4o,
|
| 315 |
+
title={{GPT-4o} system card},
|
| 316 |
+
author={{OpenAI}},
|
| 317 |
+
year={2024},
|
| 318 |
+
howpublished={\url{https://openai.com/index/gpt-4o-system-card/}}
|
| 319 |
+
}
|
| 320 |
+
|
| 321 |
+
@misc{google2025gemini,
|
| 322 |
+
title={{Gemini} 2.5 Flash},
|
| 323 |
+
author={{Google DeepMind}},
|
| 324 |
+
year={2025},
|
| 325 |
+
howpublished={\url{https://deepmind.google/technologies/gemini/}}
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
@misc{anthropic2025claude,
|
| 329 |
+
title={Claude {Haiku} 4.5 Model Card},
|
| 330 |
+
author={{Anthropic}},
|
| 331 |
+
year={2025},
|
| 332 |
+
howpublished={\url{https://www.anthropic.com}}
|
| 333 |
+
}
|
| 334 |
+
|
| 335 |
+
% ===== Methodology =====
|
| 336 |
+
|
| 337 |
+
@article{gebru2021datasheets,
|
| 338 |
+
title={Datasheets for Datasets},
|
| 339 |
+
author={Gebru, Timnit and Morgenstern, Jamie and Vecchione, Briana and Vaughan, Jennifer Wortman and Wallach, Hanna and III, Hal Daum{\'e} and Crawford, Kate},
|
| 340 |
+
journal={Communications of the ACM},
|
| 341 |
+
volume={64},
|
| 342 |
+
number={12},
|
| 343 |
+
pages={86--92},
|
| 344 |
+
year={2021}
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
@inproceedings{akhtar2024croissant,
|
| 348 |
+
title={Croissant: A Metadata Format for {ML}-Ready Datasets},
|
| 349 |
+
author={Akhtar, Mubashara and Benjelloun, Omar and Conforti, Costanza and van der Maaten, Laurens and others},
|
| 350 |
+
booktitle={Proceedings of KDD},
|
| 351 |
+
year={2024}
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
@inproceedings{chen2016xgboost,
|
| 355 |
+
title={{XGBoost}: A scalable tree boosting system},
|
| 356 |
+
author={Chen, Tianqi and Guestrin, Carlos},
|
| 357 |
+
booktitle={Proceedings of KDD},
|
| 358 |
+
pages={785--794},
|
| 359 |
+
year={2016}
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
@article{karypis1998metis,
|
| 363 |
+
title={A fast and high quality multilevel scheme for partitioning irregular graphs},
|
| 364 |
+
author={Karypis, George and Kumar, Vipin},
|
| 365 |
+
journal={SIAM Journal on Scientific Computing},
|
| 366 |
+
volume={20},
|
| 367 |
+
number={1},
|
| 368 |
+
pages={359--392},
|
| 369 |
+
year={1998}
|
| 370 |
+
}
|
| 371 |
+
|
| 372 |
+
% ===== Contamination & Evaluation =====
|
| 373 |
+
|
| 374 |
+
@article{sainz2024contamination,
|
| 375 |
+
title={Data contamination report from the 2024 {NAACL} workshop},
|
| 376 |
+
author={Sainz, Oscar and others},
|
| 377 |
+
journal={arXiv preprint},
|
| 378 |
+
year={2024}
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
@article{balloccu2024leak,
|
| 382 |
+
title={Leak, cheat, repeat: Data contamination and evaluation malpractices in closed-source {LLMs}},
|
| 383 |
+
author={Balloccu, Simone and others},
|
| 384 |
+
journal={Proceedings of EACL},
|
| 385 |
+
year={2024}
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
% ===== Splitting Strategies =====
|
| 389 |
+
|
| 390 |
+
@article{yang2019cold,
|
| 391 |
+
title={Analyzing learned molecular representations for property prediction},
|
| 392 |
+
author={Yang, Kevin and Swanson, Kyle and Jin, Wengong and Coley, Connor and Eiden, Philipp and Gao, Hua and Guzman-Perez, Angel and Hopper, Timothy and Kelley, Brian and Mathea, Miriam and others},
|
| 393 |
+
journal={Journal of Chemical Information and Modeling},
|
| 394 |
+
volume={59},
|
| 395 |
+
number={8},
|
| 396 |
+
pages={3370--3388},
|
| 397 |
+
year={2019},
|
| 398 |
+
publisher={ACS Publications}
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
@article{bemis1996murcko,
|
| 402 |
+
title={The properties of known drugs. 1. Molecular frameworks},
|
| 403 |
+
author={Bemis, Guy W and Murcko, Mark A},
|
| 404 |
+
journal={Journal of Medicinal Chemistry},
|
| 405 |
+
volume={39},
|
| 406 |
+
number={15},
|
| 407 |
+
pages={2887--2893},
|
| 408 |
+
year={1996}
|
| 409 |
+
}
|
| 410 |
+
|
| 411 |
+
% ===== PPI Models =====
|
| 412 |
+
|
| 413 |
+
@article{chen2019pipr,
|
| 414 |
+
title={Multifaceted protein--protein interaction prediction based on {Siamese} residual {RCNN}},
|
| 415 |
+
author={Chen, Muhao and Ju, Chelsea J-T and Zhou, Guangyu and Chen, Xuelu and Zhang, Tianle and Chang, Kai-Wei and Zaniolo, Carlo and Wang, Wei},
|
| 416 |
+
journal={Bioinformatics},
|
| 417 |
+
volume={35},
|
| 418 |
+
number={14},
|
| 419 |
+
pages={i305--i314},
|
| 420 |
+
year={2019},
|
| 421 |
+
publisher={Oxford University Press}
|
| 422 |
+
}
|
| 423 |
+
|
| 424 |
+
@article{zheng2020ddb,
|
| 425 |
+
title={Predicting drug--target interactions using drug--drug and target--target similarities},
|
| 426 |
+
author={Zheng, Yi and Wu, Zheng},
|
| 427 |
+
journal={BMC Bioinformatics},
|
| 428 |
+
year={2020}
|
| 429 |
+
}
|
| 430 |
+
|
| 431 |
+
% ===== LogAUC & Metrics =====
|
| 432 |
+
|
| 433 |
+
@article{mysinger2010logauc,
|
| 434 |
+
title={Rapid context-dependent ligand desolvation in molecular docking},
|
| 435 |
+
author={Mysinger, Michael M and Shoichet, Brian K},
|
| 436 |
+
journal={Journal of Chemical Information and Modeling},
|
| 437 |
+
volume={50},
|
| 438 |
+
number={9},
|
| 439 |
+
pages={1561--1573},
|
| 440 |
+
year={2010}
|
| 441 |
+
}
|
| 442 |
+
|
| 443 |
+
@article{truchon2007bedroc,
|
| 444 |
+
title={Evaluating virtual screening methods: good and bad metrics for the ``early recognition'' problem},
|
| 445 |
+
author={Truchon, Jean-Fran{\c{c}}ois and Bayly, Christopher I},
|
| 446 |
+
journal={Journal of Chemical Information and Modeling},
|
| 447 |
+
volume={47},
|
| 448 |
+
number={2},
|
| 449 |
+
pages={488--508},
|
| 450 |
+
year={2007}
|
| 451 |
+
}
|
| 452 |
+
|
| 453 |
+
% ===== General ML =====
|
| 454 |
+
|
| 455 |
+
@article{matthews1975mcc,
|
| 456 |
+
title={Comparison of the predicted and observed secondary structure of {T4} phage lysozyme},
|
| 457 |
+
author={Matthews, Brian W},
|
| 458 |
+
journal={Biochimica et Biophysica Acta (BBA)-Protein Structure},
|
| 459 |
+
volume={405},
|
| 460 |
+
number={2},
|
| 461 |
+
pages={442--451},
|
| 462 |
+
year={1975}
|
| 463 |
+
}
|
paper/scripts/generate_figures.py
ADDED
|
@@ -0,0 +1,343 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Generate all 3 paper figures as PDF files.
|
| 3 |
+
|
| 4 |
+
Figure 1: NegBioDB Architecture + Scale (architecture diagram + bar chart)
|
| 5 |
+
Figure 2: ML Cold-Split Catastrophe Heatmap (cross-domain AUROC heatmap)
|
| 6 |
+
Figure 3: L4 Opacity Gradient + Contamination (MCC bars + contamination panel)
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import matplotlib
|
| 10 |
+
matplotlib.use("Agg")
|
| 11 |
+
import matplotlib.pyplot as plt
|
| 12 |
+
import matplotlib.patches as mpatches
|
| 13 |
+
from matplotlib.patches import FancyBboxPatch
|
| 14 |
+
import numpy as np
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
|
| 17 |
+
# NeurIPS style settings
|
| 18 |
+
plt.rcParams.update({
|
| 19 |
+
"font.family": "serif",
|
| 20 |
+
"font.size": 8,
|
| 21 |
+
"axes.labelsize": 9,
|
| 22 |
+
"axes.titlesize": 9,
|
| 23 |
+
"xtick.labelsize": 7,
|
| 24 |
+
"ytick.labelsize": 7,
|
| 25 |
+
"legend.fontsize": 7,
|
| 26 |
+
"figure.dpi": 300,
|
| 27 |
+
"savefig.dpi": 300,
|
| 28 |
+
"savefig.bbox": "tight",
|
| 29 |
+
"savefig.pad_inches": 0.02,
|
| 30 |
+
})
|
| 31 |
+
|
| 32 |
+
OUTDIR = Path(__file__).resolve().parent.parent / "figures"
|
| 33 |
+
OUTDIR.mkdir(exist_ok=True)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
# ============================================================
|
| 37 |
+
# Figure 1: Architecture + Scale
|
| 38 |
+
# ============================================================
|
| 39 |
+
def fig1_overview():
|
| 40 |
+
"""Architecture diagram (Panel A) + stacked bar chart (Panel B)."""
|
| 41 |
+
fig, (ax_arch, ax_bar) = plt.subplots(
|
| 42 |
+
1, 2, figsize=(7, 2.8), gridspec_kw={"width_ratios": [1.3, 1]}
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
# --- Panel A: Architecture diagram ---
|
| 46 |
+
ax_arch.set_xlim(0, 10)
|
| 47 |
+
ax_arch.set_ylim(0, 7)
|
| 48 |
+
ax_arch.axis("off")
|
| 49 |
+
ax_arch.set_title("(a) NegBioDB Architecture", fontsize=9, fontweight="bold", pad=4)
|
| 50 |
+
|
| 51 |
+
# Common layer box
|
| 52 |
+
common = FancyBboxPatch(
|
| 53 |
+
(1, 5.5), 8, 1.2, boxstyle="round,pad=0.1",
|
| 54 |
+
facecolor="#E8E8E8", edgecolor="black", linewidth=1.0
|
| 55 |
+
)
|
| 56 |
+
ax_arch.add_patch(common)
|
| 57 |
+
ax_arch.text(5, 6.1, "Common Layer", ha="center", va="center",
|
| 58 |
+
fontsize=8, fontweight="bold")
|
| 59 |
+
ax_arch.text(5, 5.7, "Hypothesis | Evidence | Outcome | Confidence Tier",
|
| 60 |
+
ha="center", va="center", fontsize=6, style="italic")
|
| 61 |
+
|
| 62 |
+
# Domain boxes
|
| 63 |
+
domains = [
|
| 64 |
+
("DTI", "#4C72B0", 1.0, [
|
| 65 |
+
"ChEMBL (30.5M)", "PubChem", "BindingDB", "DAVIS"
|
| 66 |
+
]),
|
| 67 |
+
("CT", "#DD8452", 4.0, [
|
| 68 |
+
"AACT (133K)", "Open Targets", "CTO", "Shi & Du"
|
| 69 |
+
]),
|
| 70 |
+
("PPI", "#55A868", 7.0, [
|
| 71 |
+
"IntAct (2.2M)", "HuRI", "hu.MAP", "STRING"
|
| 72 |
+
]),
|
| 73 |
+
]
|
| 74 |
+
for name, color, x, sources in domains:
|
| 75 |
+
box = FancyBboxPatch(
|
| 76 |
+
(x, 1.0), 2.0, 3.8, boxstyle="round,pad=0.1",
|
| 77 |
+
facecolor=color, edgecolor="black", linewidth=0.8, alpha=0.25
|
| 78 |
+
)
|
| 79 |
+
ax_arch.add_patch(box)
|
| 80 |
+
ax_arch.text(x + 1.0, 4.4, name, ha="center", va="center",
|
| 81 |
+
fontsize=8, fontweight="bold", color=color)
|
| 82 |
+
for i, src in enumerate(sources):
|
| 83 |
+
ax_arch.text(x + 1.0, 3.6 - i * 0.65, src, ha="center",
|
| 84 |
+
va="center", fontsize=5.5)
|
| 85 |
+
|
| 86 |
+
# Arrow from common to domain
|
| 87 |
+
ax_arch.annotate(
|
| 88 |
+
"", xy=(x + 1.0, 4.8), xytext=(x + 1.0, 5.5),
|
| 89 |
+
arrowprops=dict(arrowstyle="->", color="black", lw=0.8)
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
# --- Panel B: Stacked bar chart ---
|
| 93 |
+
ax_bar.set_title("(b) Scale by Confidence Tier", fontsize=9, fontweight="bold", pad=4)
|
| 94 |
+
|
| 95 |
+
# Tier data (verified from database queries)
|
| 96 |
+
tier_colors = {"Gold": "#FFD700", "Silver": "#C0C0C0", "Bronze": "#CD7F32", "Copper": "#B87333"}
|
| 97 |
+
domains_data = {
|
| 98 |
+
"DTI": {"Gold": 818611, "Silver": 774875, "Bronze": 28866097, "Copper": 0},
|
| 99 |
+
"PPI": {"Gold": 500069, "Silver": 1229601, "Bronze": 500000, "Copper": 0},
|
| 100 |
+
"CT": {"Gold": 23570, "Silver": 28505, "Bronze": 60223, "Copper": 20627},
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
x_pos = np.arange(3)
|
| 104 |
+
labels = ["DTI", "PPI", "CT"]
|
| 105 |
+
bottom = np.zeros(3)
|
| 106 |
+
|
| 107 |
+
for tier, color in tier_colors.items():
|
| 108 |
+
vals = [domains_data[d][tier] for d in labels]
|
| 109 |
+
ax_bar.bar(x_pos, vals, 0.6, bottom=bottom, color=color, label=tier,
|
| 110 |
+
edgecolor="white", linewidth=0.5)
|
| 111 |
+
bottom += vals
|
| 112 |
+
|
| 113 |
+
ax_bar.set_yscale("log")
|
| 114 |
+
ax_bar.set_ylabel("Negative Results")
|
| 115 |
+
ax_bar.set_xticks(x_pos)
|
| 116 |
+
ax_bar.set_xticklabels(labels)
|
| 117 |
+
ax_bar.set_ylim(1e4, 5e7)
|
| 118 |
+
ax_bar.legend(loc="upper right", framealpha=0.9, ncol=2)
|
| 119 |
+
ax_bar.spines["top"].set_visible(False)
|
| 120 |
+
ax_bar.spines["right"].set_visible(False)
|
| 121 |
+
|
| 122 |
+
# Totals on top
|
| 123 |
+
totals = [30.5e6, 2.23e6, 132925]
|
| 124 |
+
for i, t in enumerate(totals):
|
| 125 |
+
if t >= 1e6:
|
| 126 |
+
label = f"{t/1e6:.1f}M"
|
| 127 |
+
else:
|
| 128 |
+
label = f"{t/1e3:.0f}K"
|
| 129 |
+
ax_bar.text(i, bottom[i] * 1.15, label, ha="center", va="bottom", fontsize=7,
|
| 130 |
+
fontweight="bold")
|
| 131 |
+
|
| 132 |
+
plt.tight_layout()
|
| 133 |
+
fig.savefig(OUTDIR / "fig1_overview.pdf")
|
| 134 |
+
plt.close(fig)
|
| 135 |
+
print(" -> fig1_overview.pdf")
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
# ============================================================
|
| 139 |
+
# Figure 2: ML Cold-Split Catastrophe Heatmap
|
| 140 |
+
# ============================================================
|
| 141 |
+
def fig2_ml_heatmap():
|
| 142 |
+
"""Cross-domain ML AUROC heatmap showing cold-split catastrophe."""
|
| 143 |
+
# Data: AUROC values (negbiodb, best seed or 3-seed avg)
|
| 144 |
+
# Rows: (Domain, Model)
|
| 145 |
+
# Columns: split strategies
|
| 146 |
+
|
| 147 |
+
row_labels = [
|
| 148 |
+
"DTI / DeepDTA",
|
| 149 |
+
"DTI / GraphDTA",
|
| 150 |
+
"DTI / DrugBAN",
|
| 151 |
+
"CT / XGBoost",
|
| 152 |
+
"CT / MLP",
|
| 153 |
+
"CT / GNN",
|
| 154 |
+
"PPI / SiameseCNN",
|
| 155 |
+
"PPI / PIPR",
|
| 156 |
+
"PPI / MLPFeatures",
|
| 157 |
+
]
|
| 158 |
+
|
| 159 |
+
# Column labels: Random, Cold-X, Cold-Y, DDB
|
| 160 |
+
col_labels = ["Random", "Cold-X", "Cold-Y", "DDB"]
|
| 161 |
+
|
| 162 |
+
# AUROC data matrix
|
| 163 |
+
# DTI: seed 42, negbiodb negatives
|
| 164 |
+
# Cold-X = cold_compound (DTI), cold_drug (CT), cold_protein (PPI)
|
| 165 |
+
# Cold-Y = cold_target (DTI), cold_condition (CT), cold_both (PPI)
|
| 166 |
+
data = np.array([
|
| 167 |
+
# DTI (seed 42)
|
| 168 |
+
[0.997, 0.996, 0.887, 0.997], # DeepDTA
|
| 169 |
+
[0.997, 0.997, 0.863, 0.997], # GraphDTA
|
| 170 |
+
[0.997, 0.997, 0.760, 0.997], # DrugBAN
|
| 171 |
+
# CT (seed 42, mean of 3 seeds where available)
|
| 172 |
+
[1.000, 1.000, 1.000, np.nan], # XGBoost (no DDB)
|
| 173 |
+
[1.000, 1.000, 1.000, np.nan], # MLP
|
| 174 |
+
[1.000, 1.000, 1.000, np.nan], # GNN
|
| 175 |
+
# PPI (3-seed average)
|
| 176 |
+
[0.963, 0.873, 0.585, 0.962], # SiameseCNN
|
| 177 |
+
[0.964, 0.859, 0.409, 0.964], # PIPR
|
| 178 |
+
[0.962, 0.931, 0.950, 0.961], # MLPFeatures
|
| 179 |
+
])
|
| 180 |
+
|
| 181 |
+
fig, ax = plt.subplots(figsize=(4.5, 3.8))
|
| 182 |
+
|
| 183 |
+
# Create masked array for NaN
|
| 184 |
+
masked = np.ma.masked_invalid(data)
|
| 185 |
+
|
| 186 |
+
# Custom colormap: red for catastrophe, green for good
|
| 187 |
+
from matplotlib.colors import LinearSegmentedColormap
|
| 188 |
+
colors_list = ["#d62728", "#ff7f0e", "#ffdd57", "#98df8a", "#2ca02c"]
|
| 189 |
+
cmap = LinearSegmentedColormap.from_list("catastrophe", colors_list, N=256)
|
| 190 |
+
cmap.set_bad(color="#f0f0f0")
|
| 191 |
+
|
| 192 |
+
im = ax.imshow(masked, cmap=cmap, aspect="auto", vmin=0.3, vmax=1.0)
|
| 193 |
+
|
| 194 |
+
# Annotate cells
|
| 195 |
+
for i in range(len(row_labels)):
|
| 196 |
+
for j in range(len(col_labels)):
|
| 197 |
+
val = data[i, j]
|
| 198 |
+
if np.isnan(val):
|
| 199 |
+
ax.text(j, i, "N/A", ha="center", va="center",
|
| 200 |
+
fontsize=6.5, color="gray")
|
| 201 |
+
else:
|
| 202 |
+
color = "white" if val < 0.6 else "black"
|
| 203 |
+
weight = "bold" if val < 0.7 else "normal"
|
| 204 |
+
ax.text(j, i, f"{val:.3f}", ha="center", va="center",
|
| 205 |
+
fontsize=6.5, color=color, fontweight=weight)
|
| 206 |
+
|
| 207 |
+
# Domain separators
|
| 208 |
+
ax.axhline(2.5, color="black", linewidth=1.5)
|
| 209 |
+
ax.axhline(5.5, color="black", linewidth=1.5)
|
| 210 |
+
|
| 211 |
+
ax.set_xticks(range(len(col_labels)))
|
| 212 |
+
ax.set_xticklabels(col_labels)
|
| 213 |
+
ax.set_yticks(range(len(row_labels)))
|
| 214 |
+
ax.set_yticklabels(row_labels, fontsize=7)
|
| 215 |
+
ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)
|
| 216 |
+
|
| 217 |
+
# Domain labels on right
|
| 218 |
+
for y, label in [(1, "DTI"), (4, "CT"), (7, "PPI")]:
|
| 219 |
+
ax.text(len(col_labels) - 0.3, y, label, ha="left", va="center",
|
| 220 |
+
fontsize=8, fontweight="bold", color="gray",
|
| 221 |
+
transform=ax.get_yaxis_transform())
|
| 222 |
+
|
| 223 |
+
cbar = fig.colorbar(im, ax=ax, fraction=0.03, pad=0.08)
|
| 224 |
+
cbar.set_label("AUROC", fontsize=8)
|
| 225 |
+
|
| 226 |
+
ax.set_title("ML Cold-Split Performance (AUROC)", fontsize=9,
|
| 227 |
+
fontweight="bold", pad=12)
|
| 228 |
+
|
| 229 |
+
plt.tight_layout()
|
| 230 |
+
fig.savefig(OUTDIR / "fig2_ml_heatmap.pdf")
|
| 231 |
+
plt.close(fig)
|
| 232 |
+
print(" -> fig2_ml_heatmap.pdf")
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
# ============================================================
|
| 236 |
+
# Figure 3: L4 Opacity Gradient + Contamination
|
| 237 |
+
# ============================================================
|
| 238 |
+
def fig3_l4_gradient():
|
| 239 |
+
"""Panel A: L4 MCC bars across domains. Panel B: PPI contamination."""
|
| 240 |
+
fig, (ax_mcc, ax_contam) = plt.subplots(
|
| 241 |
+
1, 2, figsize=(7, 2.5), gridspec_kw={"width_ratios": [1.6, 1]}
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
# --- Panel A: L4 MCC across domains ---
|
| 245 |
+
# Use best config (3-shot) for each model, common models only
|
| 246 |
+
# 4 common models: Gemini, GPT-4o-mini, Llama, Qwen
|
| 247 |
+
# + Haiku for CT/PPI (DTI N/A)
|
| 248 |
+
models = ["Gemini", "GPT-4o", "Llama", "Qwen", "Haiku"]
|
| 249 |
+
model_colors = ["#4C72B0", "#DD8452", "#55A868", "#C44E52", "#8172B3"]
|
| 250 |
+
|
| 251 |
+
# MCC values (best config per model — may be zero-shot or 3-shot)
|
| 252 |
+
# DTI: Gemini 3s, GPT 3s, Llama 3s, Qwen 3s, Haiku N/A
|
| 253 |
+
dti_mcc = [-0.102, 0.047, 0.184, 0.113, np.nan]
|
| 254 |
+
# PPI: Gemini 3s, GPT 0s, Llama 0s, Qwen 3s, Haiku 3s
|
| 255 |
+
ppi_mcc = [0.382, 0.430, 0.441, 0.369, 0.390]
|
| 256 |
+
# CT: Gemini 3s, GPT 0s, Llama 3s, Qwen 0s, Haiku 0s
|
| 257 |
+
ct_mcc = [0.563, 0.491, 0.504, 0.519, 0.514]
|
| 258 |
+
|
| 259 |
+
x = np.arange(3) # 3 domains
|
| 260 |
+
n_models = len(models)
|
| 261 |
+
width = 0.15
|
| 262 |
+
offsets = np.arange(n_models) - (n_models - 1) / 2
|
| 263 |
+
|
| 264 |
+
for i, (model, color) in enumerate(zip(models, model_colors)):
|
| 265 |
+
vals = [dti_mcc[i], ppi_mcc[i], ct_mcc[i]]
|
| 266 |
+
positions = x + offsets[i] * width
|
| 267 |
+
bars = ax_mcc.bar(positions, vals, width * 0.9, color=color, label=model,
|
| 268 |
+
edgecolor="white", linewidth=0.3)
|
| 269 |
+
# Mark NaN bars
|
| 270 |
+
for j, v in enumerate(vals):
|
| 271 |
+
if np.isnan(v):
|
| 272 |
+
ax_mcc.text(positions[j], 0.02, "N/A", ha="center", va="bottom",
|
| 273 |
+
fontsize=5, color="gray", rotation=90)
|
| 274 |
+
|
| 275 |
+
ax_mcc.axhline(0, color="black", linewidth=0.5, linestyle="--", alpha=0.5)
|
| 276 |
+
ax_mcc.set_xticks(x)
|
| 277 |
+
ax_mcc.set_xticklabels(["DTI\n(opaque)", "PPI\n(crawlable)", "CT\n(public)"])
|
| 278 |
+
ax_mcc.set_ylabel("MCC")
|
| 279 |
+
ax_mcc.set_ylim(-0.15, 0.65)
|
| 280 |
+
ax_mcc.set_title("(a) L4 Discrimination: The Opacity Gradient",
|
| 281 |
+
fontsize=9, fontweight="bold", pad=4)
|
| 282 |
+
ax_mcc.legend(loc="upper left", ncol=3, framealpha=0.9, fontsize=6)
|
| 283 |
+
ax_mcc.spines["top"].set_visible(False)
|
| 284 |
+
ax_mcc.spines["right"].set_visible(False)
|
| 285 |
+
|
| 286 |
+
# Trend arrow
|
| 287 |
+
ax_mcc.annotate(
|
| 288 |
+
"", xy=(2.35, 0.55), xytext=(-0.15, 0.0),
|
| 289 |
+
arrowprops=dict(arrowstyle="->", color="red", lw=1.5,
|
| 290 |
+
connectionstyle="arc3,rad=0.15", alpha=0.4)
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
# --- Panel B: PPI Contamination ---
|
| 294 |
+
# Pre-2015 vs Post-2020 accuracy per model (best 3-shot run)
|
| 295 |
+
contam_models = ["Gemini", "GPT-4o", "Llama", "Qwen", "Haiku"]
|
| 296 |
+
pre_2015 = [0.765, 0.569, 0.745, 0.588, 0.618]
|
| 297 |
+
post_2020 = [0.184, 0.112, 0.133, 0.112, 0.051]
|
| 298 |
+
|
| 299 |
+
x_c = np.arange(len(contam_models))
|
| 300 |
+
w = 0.35
|
| 301 |
+
|
| 302 |
+
ax_contam.bar(x_c - w/2, pre_2015, w, color="#4C72B0", label="Pre-2015",
|
| 303 |
+
edgecolor="white", linewidth=0.3)
|
| 304 |
+
ax_contam.bar(x_c + w/2, post_2020, w, color="#DD8452", label="Post-2020",
|
| 305 |
+
edgecolor="white", linewidth=0.3)
|
| 306 |
+
|
| 307 |
+
# Gap annotations
|
| 308 |
+
for i in range(len(contam_models)):
|
| 309 |
+
gap = pre_2015[i] - post_2020[i]
|
| 310 |
+
mid = (pre_2015[i] + post_2020[i]) / 2
|
| 311 |
+
ax_contam.annotate(
|
| 312 |
+
f"\u0394={gap:.2f}", xy=(i, mid), fontsize=5.5,
|
| 313 |
+
ha="center", va="center", color="red", fontweight="bold",
|
| 314 |
+
bbox=dict(boxstyle="round,pad=0.15", facecolor="white",
|
| 315 |
+
edgecolor="none", alpha=0.8)
|
| 316 |
+
)
|
| 317 |
+
|
| 318 |
+
ax_contam.axhline(0.5, color="gray", linewidth=0.5, linestyle=":", alpha=0.5)
|
| 319 |
+
ax_contam.set_xticks(x_c)
|
| 320 |
+
ax_contam.set_xticklabels(contam_models, fontsize=6.5)
|
| 321 |
+
ax_contam.set_ylabel("Accuracy")
|
| 322 |
+
ax_contam.set_ylim(0, 0.9)
|
| 323 |
+
ax_contam.set_title("(b) PPI Contamination (L4)",
|
| 324 |
+
fontsize=9, fontweight="bold", pad=4)
|
| 325 |
+
ax_contam.legend(loc="upper right", framealpha=0.9, fontsize=6)
|
| 326 |
+
ax_contam.spines["top"].set_visible(False)
|
| 327 |
+
ax_contam.spines["right"].set_visible(False)
|
| 328 |
+
|
| 329 |
+
plt.tight_layout()
|
| 330 |
+
fig.savefig(OUTDIR / "fig3_l4_gradient.pdf")
|
| 331 |
+
plt.close(fig)
|
| 332 |
+
print(" -> fig3_l4_gradient.pdf")
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
# ============================================================
|
| 336 |
+
# Main
|
| 337 |
+
# ============================================================
|
| 338 |
+
if __name__ == "__main__":
|
| 339 |
+
print("Generating paper figures...")
|
| 340 |
+
fig1_overview()
|
| 341 |
+
fig2_ml_heatmap()
|
| 342 |
+
fig3_l4_gradient()
|
| 343 |
+
print("Done. Figures saved to:", OUTDIR)
|
paper/sections/abstract.tex
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\begin{abstract}
|
| 2 |
+
Publication bias ensures that negative experimental results---inactive compounds, failed clinical trials, confirmed non-interactions---remain largely unreported, creating systematic blind spots in biomedical AI. Machine learning benchmarks treat untested pairs as negatives, while large language models hallucinate evidence for experiments never conducted. We introduce \textbf{NegBioDB}, the first multi-domain database of experimentally confirmed negative results, aggregating 32.9 million entries from 12 data sources across three domains: drug--target interaction (DTI; 30.5M), clinical trial failure (CT; 133K), and protein--protein interaction (PPI; 2.2M), organized by four confidence tiers. We pair this resource with \textbf{NegBioBench}, a dual ML+LLM benchmark comprising 421 experiments (180 ML, 241 LLM) across four evaluation levels of increasing cognitive demand. Our experiments reveal three findings: (1)~control negatives inflate DTI model performance by +0.112 LogAUC, with model-dependent effects in PPI and CT; (2)~cold-entity splits expose universal generalization failures, including a sequence-based PPI model dropping to AUROC~=~0.41 (below random); and (3)~the \emph{opacity gradient}---LLM discrimination between tested-negative and untested pairs correlates with data accessibility in training corpora (DTI MCC~0.18 $\to$ PPI~0.44 $\to$ CT~0.56), not biological reasoning capability. Temporal contamination analysis confirms that PPI performance reflects memorization rather than understanding. All five evaluated LLMs exhibit 100\% evidence hallucination rates across all domains. NegBioDB and NegBioBench are released under CC BY-SA 4.0 with Croissant metadata at \url{https://github.com/jang1563/NegBioDB}.
|
| 3 |
+
\end{abstract}
|
paper/sections/benchmark.tex
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\section{NegBioBench: Evaluation Framework}
|
| 2 |
+
\label{sec:benchmark}
|
| 3 |
+
|
| 4 |
+
NegBioBench is a dual ML+LLM benchmark designed to evaluate both predictive models and language models on negative biological evidence. We describe the two evaluation tracks and our methodology for measuring genuine understanding versus memorization.
|
| 5 |
+
|
| 6 |
+
\subsection{ML Track}
|
| 7 |
+
|
| 8 |
+
The ML track evaluates whether predictive models can distinguish experimentally confirmed negatives from positives, and whether standard evaluation practices inflate reported performance.
|
| 9 |
+
|
| 10 |
+
\textbf{Tasks.} We define two task types: \emph{M1} (binary classification: negative vs.\ positive) across all three domains, and \emph{M2} (7-way failure category prediction) for CT only. Positive examples come from established sources: DAVIS actives for DTI, CTO successful trials~\citep{siah2021cto} for CT, and HuRI positive interactions~\citep{luck2020huri} for PPI.
|
| 11 |
+
|
| 12 |
+
\textbf{Splitting strategies.} We implement domain-appropriate cold splits to test generalization: cold\_drug and cold\_target (DTI), cold\_drug and cold\_condition (CT), cold\_protein and cold\_both via METIS graph partitioning~\citep{karypis1998metis} (PPI), plus random, temporal, scaffold~\citep{bemis1996murcko}, and degree-balanced (DDB)~\citep{zheng2020ddb} splits where applicable.
|
| 13 |
+
|
| 14 |
+
\textbf{Control negatives.} To measure the effect of negative source on model performance (\emph{Experiment~1}), we train identical models on NegBioDB negatives versus two control sets: uniform random pairs and degree-matched random pairs. This directly tests whether curated negatives carry different signal than assumed negatives.
|
| 15 |
+
|
| 16 |
+
\textbf{Models.} Three architectures per domain: DeepDTA~\citep{ozturk2018deepdta}, GraphDTA~\citep{nguyen2021graphdta}, and DrugBAN~\citep{bai2023drugban} for DTI; XGBoost~\citep{chen2016xgboost}, MLP, and GNN for CT; SiameseCNN, PIPR~\citep{chen2019pipr}, and MLPFeatures for PPI. Metrics include AUROC, LogAUC$_{[0.001,0.1]}$~\citep{mysinger2010logauc}, AUPRC, and MCC~\citep{matthews1975mcc}.
|
| 17 |
+
|
| 18 |
+
\subsection{LLM Track}
|
| 19 |
+
|
| 20 |
+
The LLM track evaluates language models across four levels of increasing cognitive demand:
|
| 21 |
+
|
| 22 |
+
\textbf{L1 (Multiple Choice).} Classification of negative evidence into domain-specific categories (4-way for DTI/PPI, 5-way for CT). Tests whether LLMs can recognize evidence types from textual descriptions.
|
| 23 |
+
|
| 24 |
+
\textbf{L2 (Extraction).} Structured JSON extraction of key fields from evidence text (compound/target identifiers, assay types, p-values). Tests whether LLMs can parse scientific evidence into machine-readable formats.
|
| 25 |
+
|
| 26 |
+
\textbf{L3 (Reasoning).} Open-ended scientific reasoning about why a negative result was observed and its implications. Evaluated by an LLM-as-judge on four dimensions: accuracy, completeness, reasoning quality, and specificity.
|
| 27 |
+
|
| 28 |
+
\textbf{L4 (Discrimination).} Binary classification of whether a given entity pair has been \emph{experimentally tested and found inactive} versus \emph{never tested}. This is the critical level: it tests whether LLMs possess genuine understanding of negative results or merely recall information from training data.
|
| 29 |
+
|
| 30 |
+
\textbf{Models and configurations.} Five models span the capability spectrum: Llama-3.3-70B~\citep{dubey2024llama3}, Qwen2.5-32B~\citep{yang2024qwen2}, GPT-4o-mini~\citep{openai2024gpt4o}, Gemini-2.5-Flash~\citep{google2025gemini}, and Claude Haiku-4.5~\citep{anthropic2025claude}. Each is evaluated in zero-shot and three few-shot configurations (different random example sets), yielding 4 configurations per model--level pair.
|
| 31 |
+
|
| 32 |
+
\subsection{Evaluation Methodology}
|
| 33 |
+
|
| 34 |
+
NegBioBench makes three methodological contributions relevant to the evaluation landscape:
|
| 35 |
+
|
| 36 |
+
\textbf{L4 as a contamination probe.} L4 discrimination performance reveals whether models have memorized negative results from training data. We incorporate temporal stratification (pre-2015 vs.\ post-2020 publication dates) to detect contamination: a performance gap exceeding 0.15 indicates likely memorization rather than reasoning~\citep{sainz2024contamination}.
|
| 37 |
+
|
| 38 |
+
\textbf{Cross-domain comparison.} By applying identical evaluation levels across three domains with different data accessibility profiles, we can isolate the effect of training data composition on LLM performance---independent of biological task difficulty.
|
| 39 |
+
|
| 40 |
+
\textbf{Anti-contamination design.} L1--L3 use paraphrased evidence text to reduce verbatim memorization advantage. L4 temporal holdouts ensure post-training-cutoff examples are included. All hallucinated evidence citations are tracked to measure confabulation rates.
|
paper/sections/database.tex
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\section{NegBioDB: A Database of Negative Results}
|
| 2 |
+
\label{sec:database}
|
| 3 |
+
|
| 4 |
+
NegBioDB is a multi-domain database of experimentally confirmed negative results in biomedicine, aggregating 32.9M entries from 12 data sources across three domains. We first describe the common design principles, then detail each domain.
|
| 5 |
+
|
| 6 |
+
\subsection{Design Principles}
|
| 7 |
+
|
| 8 |
+
All three domains share a common abstraction layer: each record encodes a \emph{hypothesis} (e.g., ``compound X inhibits target Y''), \emph{experimental evidence} (assay type, method, publication), an \emph{outcome} (inactive, failed, non-interacting), and a \emph{confidence tier} reflecting evidence quality. The four-tier system is:
|
| 9 |
+
\textbf{Gold}---systematic screens or multiple independent confirmations (e.g., DAVIS kinase panel, HuRI Y2H screen);
|
| 10 |
+
\textbf{Silver}---single quantitative measurement or statistical evidence (e.g., $p>0.05$ from clinical trial, ML-derived from co-purification data);
|
| 11 |
+
\textbf{Bronze}---computationally derived or NLP-detected (e.g., STRING zero-score pairs, NLP-classified trial terminations);
|
| 12 |
+
\textbf{Copper}---label-only annotations without detailed evidence.
|
| 13 |
+
Table~\ref{tab:overview} summarizes the database scope.
|
| 14 |
+
|
| 15 |
+
\begin{table}[t]
|
| 16 |
+
\centering
|
| 17 |
+
\caption{NegBioDB database overview across three biomedical domains.}
|
| 18 |
+
\label{tab:overview}
|
| 19 |
+
\small
|
| 20 |
+
\begin{tabular}{@{}lrrrr@{}}
|
| 21 |
+
\toprule
|
| 22 |
+
& \textbf{DTI} & \textbf{CT} & \textbf{PPI} & \textbf{Total} \\
|
| 23 |
+
\midrule
|
| 24 |
+
Negative results & 30.5M & 132,925 & 2.23M & 32.9M \\
|
| 25 |
+
Key entities & 919K / 3.7K & 177K / 56K & 18.4K & --- \\
|
| 26 |
+
& \scriptsize{(cpd / tgt)} & \scriptsize{(interv / cond)} & \scriptsize{(proteins)} & \\
|
| 27 |
+
Data sources & 4 & 4 & 4 & 12 \\
|
| 28 |
+
Confidence tiers & 3 & 4 & 3 & 4 \\
|
| 29 |
+
DB size & 13.2 GB & 0.5 GB & 0.8 GB & 14.6 GB \\
|
| 30 |
+
\midrule
|
| 31 |
+
ML benchmark runs & 18 & 108 & 54 & 180 \\
|
| 32 |
+
LLM benchmark runs & 81 & 80 & 80 & 241 \\
|
| 33 |
+
\bottomrule
|
| 34 |
+
\end{tabular}
|
| 35 |
+
\end{table}
|
| 36 |
+
|
| 37 |
+
\subsection{Three Domains}
|
| 38 |
+
|
| 39 |
+
\textbf{Drug--Target Interaction (DTI).}
|
| 40 |
+
We aggregate inactive compound--target pairs from four sources: ChEMBL~\citep{gaulton2017chembl} bioactivity records with pChEMBL $<5$ (i.e., IC$_{50}$ $>10\,\mu$M); PubChem~\citep{kim2023pubchem} confirmatory inactives from dose-response screens; BindingDB~\citep{gilson2016bindingdb} entries with $K_d > 10\,\mu$M; and the full DAVIS kinase selectivity matrix~\citep{davis2011comprehensive}, where untested pairs are excluded. This yields 30.5M negative results across 919K compounds and 3,694 targets---three orders of magnitude larger than standard DTI benchmarks that rely on assumed negatives~\citep{huang2021therapeutics,mysinger2012dude}.
|
| 41 |
+
|
| 42 |
+
\textbf{Clinical Trial Failure (CT).}
|
| 43 |
+
We process 216,987 trials from the AACT database~\citep{tasneem2012aact} through a three-tier failure detection pipeline: (i)~NLP classification of termination reasons into 7 failure categories (bronze tier); (ii)~statistical evidence extraction from outcome measures where $p>0.05$ indicates non-superiority (silver/gold tiers); and (iii)~integration of the Clinical Trial Outcome dataset~\citep{siah2021cto} for label-only records (copper tier). Drug names are resolved to ChEMBL identifiers through a four-step cascade (exact match, PubChem API, fuzzy matching with Jaro--Winkler $>0.90$, manual curation), achieving 20.6\% resolution with SMILES structures. The pipeline identifies 132,925 failure results with 8 failure categories: safety, efficacy, enrollment, strategic, regulatory, design, pharmacokinetic, and other.
|
| 44 |
+
|
| 45 |
+
\textbf{Protein--Protein Interaction (PPI).}
|
| 46 |
+
We compile confirmed non-interactions from four sources spanning different evidence types: IntAct~\citep{orchard2014intact} curated non-interactions from co-immunoprecipitation and two-hybrid assays (779 gold/silver pairs); HuRI~\citep{luck2020huri} systematic yeast two-hybrid screen negatives sampled from 39.9M candidates via reservoir sampling (500K gold pairs); hu.MAP~\citep{drew2021humap} ML-derived non-interactions from co-purification mass spectrometry (1.23M silver pairs); and STRING~\citep{szklarczyk2023string} zero-score pairs between well-studied proteins (500K bronze pairs). After cross-source aggregation, NegBioDB contains 2.23M unique negative PPI pairs across 18,412 human proteins with UniProt-validated identifiers and sequences (99.6\% coverage).
|
| 47 |
+
|
| 48 |
+
\begin{figure}[t]
|
| 49 |
+
\centering
|
| 50 |
+
\includegraphics[width=\textwidth]{figures/fig1_overview.pdf}
|
| 51 |
+
\caption{NegBioDB overview. \textbf{(a)} Architecture showing three domains unified by a common abstraction layer with four confidence tiers. Each domain integrates four data sources. \textbf{(b)} Scale of negative results by domain and confidence tier (log scale). DTI dominates in volume (30.5M), while CT and PPI contribute qualitatively distinct evidence types.}
|
| 52 |
+
\label{fig:overview}
|
| 53 |
+
\end{figure}
|
paper/sections/discussion.tex
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\section{Discussion and Conclusion}
|
| 2 |
+
\label{sec:discussion}
|
| 3 |
+
|
| 4 |
+
\textbf{The opacity gradient and its implications.}
|
| 5 |
+
The opacity gradient implies that LLM benchmarks must account for data accessibility in training corpora, not just task difficulty. The L4 progression---DTI (MCC~$\leq$~0.18) $\to$ PPI (0.44) $\to$ CT (0.56)---does not reflect increasing biological reasoning capability. Rather, it mirrors the degree to which each domain's data appears in web crawls: ChEMBL bioactivity tables are locked behind database queries (opaque), IntAct/STRING protein interaction data is publicly crawlable (memorizable), and ClinicalTrials.gov records are extensively discussed in news, regulatory filings, and medical literature (public). Models do not ``understand'' negative results---they recall them. This distinction is critical for responsible LLM deployment in drug discovery and clinical trial design.
|
| 6 |
+
|
| 7 |
+
\textbf{Curated negatives as a feature.}
|
| 8 |
+
NegBioDB negatives being trivially separable from positives by ML models (AUROC~=~1.0 on random splits) is a feature, not a bug: it confirms that experimentally confirmed negatives encode genuine biological signal absent from random pairs. The value of NegBioDB for ML emerges in cold and temporal splits, where models must generalize to unseen entities---and where we observe catastrophic failures (PIPR cold\_both AUROC~=~0.409) that random negatives would not reveal.
|
| 9 |
+
|
| 10 |
+
\textbf{Universal evidence hallucination.}
|
| 11 |
+
The 100\% hallucination rate for evidence citations across all 241 LLM runs is a safety concern. Even when models correctly classify negative results, they fabricate supporting evidence (PMIDs, DOIs, author names). This confabulation pattern persists across all five models, all four levels, and all three domains, suggesting it is a fundamental limitation of current LLMs rather than a model-specific issue.
|
| 12 |
+
|
| 13 |
+
\textbf{Limitations.}
|
| 14 |
+
DTI ML baselines use a single seed (CT/PPI use 3 seeds). CT drug resolution covers only 20.6\% of interventions due to non-standard drug naming in trial records. PPI L1/L2 tasks are trivially solvable with few-shot prompting, limiting their discriminative value. The L3 reasoning evaluation suffers from a judge ceiling effect in CT (Appendix~D). Contamination analysis is conclusive only for PPI; DTI L4 performance is too low to detect temporal patterns. The database was developed by a single author, mitigated by 800+ automated tests and a comprehensive reproducibility pipeline.
|
| 15 |
+
|
| 16 |
+
\textbf{Conclusion.}
|
| 17 |
+
NegBioDB provides the first multi-domain resource for experimentally confirmed negative results in biomedicine (32.9M entries, CC BY-SA 4.0, Croissant metadata). NegBioBench reveals that curated negatives carry systematically different signal than controls, cold splits expose universal generalization failures, and LLMs are fundamentally failure-blind---unable to distinguish tested from untested pairs without training data memorization. We release all databases, benchmarks, and code to support future work on negative result understanding, including community contribution tools, additional domains (e.g., gene function), and a public leaderboard.
|
paper/sections/experiments.tex
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\section{Experiments and Results}
|
| 2 |
+
\label{sec:experiments}
|
| 3 |
+
|
| 4 |
+
We organize 421 experiments (180 ML + 241 LLM) into four cross-domain findings. DTI uses a single seed; CT and PPI use 3 seeds (we report mean $\pm$ std). Full per-run tables appear in Appendices~B--C.
|
| 5 |
+
|
| 6 |
+
\subsection{Do Curated Negatives Differ from Controls?}
|
| 7 |
+
\label{sec:exp-inflation}
|
| 8 |
+
|
| 9 |
+
We train identical models on NegBioDB negatives versus uniform random and degree-matched control negatives, holding all other variables constant (Table~\ref{tab:ml_results}).
|
| 10 |
+
|
| 11 |
+
\textbf{DTI.} Degree-matched negatives inflate LogAUC by +0.112 on average across all three models (e.g., GraphDTA: 0.843 $\to$ 0.967). This confirms that assumed-negative benchmarks systematically overestimate model performance. Uniform random controls show negligible average inflation ($<$+0.01 LogAUC), indicating that degree matching specifically creates an especially easy discrimination task.
|
| 12 |
+
|
| 13 |
+
\textbf{CT.} The pattern reverses in direction: NegBioDB negatives (clinical failures) are trivially separable from CTO successes (AUROC~=~1.0), while control negatives are harder. Degree-matched controls reduce AUROC by 0.16--0.24 across models (e.g., GNN: 1.0 $\to$ 0.76). This is expected---genuine clinical failures carry rich pharmacological features absent from random drug--condition pairs.
|
| 14 |
+
|
| 15 |
+
\textbf{PPI.} The effect is \emph{model-dependent}---a novel finding. Sequence-based models (SiameseCNN, PIPR) show +0.03--0.05 LogAUC inflation with control negatives, consistent with DTI. However, MLPFeatures shows \emph{reversed} inflation: NegBioDB negatives are harder than controls ($-$0.03 to $-$0.11 LogAUC), because hand-crafted features (protein degree, subcellular localization) capture the same structural signal that curated negatives encode.
|
| 16 |
+
|
| 17 |
+
\textbf{Cross-domain insight.} Curated negatives carry systematically different signal than controls in all three domains. The \emph{direction} of inflation depends on whether the model architecture captures the same features that distinguish curated from random negatives.
|
| 18 |
+
|
| 19 |
+
\begin{table}[t]
|
| 20 |
+
\centering
|
| 21 |
+
\caption{ML results summary: best model per domain across key splits and negative source inflation. Cold-X/Y denote the domain-specific cold entity splits (drug/target for DTI, drug/condition for CT, protein/both for PPI). $\Delta$ reports the change in primary metric when switching from NegBioDB to degree-matched negatives.}
|
| 22 |
+
\label{tab:ml_results}
|
| 23 |
+
\small
|
| 24 |
+
\begin{tabular}{@{}llccccl@{}}
|
| 25 |
+
\toprule
|
| 26 |
+
\textbf{Domain} & \textbf{Model} & \textbf{Random} & \textbf{Cold-X} & \textbf{Cold-Y} & \textbf{DDB} & \textbf{Neg.\ Inflation} \\
|
| 27 |
+
\midrule
|
| 28 |
+
DTI & GraphDTA & .997 & .997 & .863 & .997 & +0.124 LogAUC \\
|
| 29 |
+
DTI & DrugBAN & .997 & .997 & .760 & .997 & +0.125 LogAUC \\
|
| 30 |
+
\midrule
|
| 31 |
+
CT-M1 & XGBoost & 1.00 & 1.00 & 1.00 & --- & $-$0.16 AUROC \\
|
| 32 |
+
CT-M2 & XGBoost & .51{\scriptsize\,mF1} & .41 & .34 & --- & --- \\
|
| 33 |
+
\midrule
|
| 34 |
+
PPI\,(seq) & PIPR & .964 & .859 & \textbf{.409} & .964 & +0.03--0.05 LogAUC \\
|
| 35 |
+
PPI\,(feat) & MLPFeat & .962 & .931 & \textbf{.950} & .961 & \textbf{$-$0.03 to $-$0.11 LogAUC} \\
|
| 36 |
+
\bottomrule
|
| 37 |
+
\end{tabular}
|
| 38 |
+
\end{table}
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
\subsection{Does Cold Splitting Expose Generalization Failures?}
|
| 42 |
+
\label{sec:exp-cold}
|
| 43 |
+
|
| 44 |
+
Cold splits remove all instances of a specific entity from the training set, testing whether models generalize to unseen drugs, targets, proteins, or conditions (Figure~\ref{fig:heatmap}).
|
| 45 |
+
|
| 46 |
+
\textbf{DTI.} Cold-target splitting is catastrophic: LogAUC collapses from 0.83 to 0.15--0.33 across all three models, while AUROC misleadingly remains 0.76--0.89. DrugBAN suffers most severely (LogAUC~=~0.151, AUROC~=~0.760). Cold-compound splitting has minimal effect, indicating models memorize target-specific patterns.
|
| 47 |
+
|
| 48 |
+
\textbf{CT.} M1 binary classification remains trivially solvable even under cold splits (AUROC~=~1.0 for cold\_drug and cold\_condition). However, M2 7-way category prediction reveals severe failures: scaffold and temporal splits collapse macro-F1 to 0.19 across all models, approaching the random baseline of $1/7 \approx 0.14$.
|
| 49 |
+
|
| 50 |
+
\textbf{PPI.} Cold-both splitting (unseen proteins on both sides, via METIS partitioning) produces a model-dependent catastrophe. PIPR drops to AUROC~=~0.409---\emph{below random}---while MLPFeatures remains robust at 0.950. This 0.54 AUROC gap between sequence-based and feature-based architectures is the largest we observe. SiameseCNN falls in between (0.585).
|
| 51 |
+
|
| 52 |
+
\textbf{Cross-domain pattern.} Cold-split catastrophe is \emph{universal} across domains but \emph{model-dependent} within each domain. Sequence-based and attention models memorize training entities; feature-based models generalize. Notably, DDB $\approx$ random in all three domains, suggesting degree-balanced splitting does not add meaningful difficulty beyond random assignment.
|
| 53 |
+
|
| 54 |
+
\begin{figure}[t]
|
| 55 |
+
\centering
|
| 56 |
+
\includegraphics[width=0.65\textwidth]{figures/fig2_ml_heatmap.pdf}
|
| 57 |
+
\caption{ML cold-split performance across domains (AUROC). Red cells indicate catastrophic failure ($<0.7$). CT M1 is trivially separable across all splits; cold-both PPI reveals a 0.54 gap between sequence (PIPR: 0.409) and feature-based (MLPFeatures: 0.950) architectures. N/A: split not applicable.}
|
| 58 |
+
\label{fig:heatmap}
|
| 59 |
+
\end{figure}
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
\subsection{Can LLMs Reason About Negative Biological Evidence?}
|
| 63 |
+
\label{sec:exp-llm}
|
| 64 |
+
|
| 65 |
+
We evaluate five LLMs across four evaluation levels in each domain (Table~\ref{tab:llm_results}).
|
| 66 |
+
|
| 67 |
+
\textbf{L1 (Multiple Choice).} Performance varies dramatically by domain. PPI is trivially solvable: all models achieve $\geq$0.999 accuracy with 3-shot prompting (evidence text makes category self-evident). CT is the hardest domain: the best model (Gemini) reaches only 0.667 accuracy on 5-way classification, well below the 0.80+ levels seen in DTI and PPI. DTI falls between (0.65--1.0 with 3-shot). Difficulty correlates with label discriminability in evidence text.
|
| 68 |
+
|
| 69 |
+
\textbf{L2 (Extraction).} PPI extraction is near-perfect (entity F1 $\geq$ 0.95, count accuracy 1.0) because protein names are unambiguous identifiers. CT is moderately difficult (field F1: 0.48--0.81) due to complex clinical evidence with multiple p-values and outcomes. DTI L2 was not evaluated due to the annotation cost of gold-standard evidence text.
|
| 70 |
+
|
| 71 |
+
\textbf{L4 (Discrimination).} This is the critical level testing genuine understanding. The results reveal a striking \emph{gradient} across domains:
|
| 72 |
+
\begin{itemize}[nosep,leftmargin=*]
|
| 73 |
+
\item \textbf{DTI}: MCC $\leq$ 0.184 --- near random. LLMs cannot distinguish tested-inactive from untested compound--target pairs.
|
| 74 |
+
\item \textbf{PPI}: MCC 0.33--0.44 --- moderate. Some discrimination ability, but contamination analysis (Section~\ref{sec:exp-gradient}) reveals this is largely memorization.
|
| 75 |
+
\item \textbf{CT}: MCC 0.48--0.56 --- meaningful. Gemini achieves 0.563, the highest discrimination across all domains.
|
| 76 |
+
\end{itemize}
|
| 77 |
+
|
| 78 |
+
\textbf{Evidence hallucination.} Across all domains, models, and levels, 100\% of generated evidence citations are hallucinated---models never cite real PMIDs or DOIs. This universal confabulation rate persists even when models make correct predictions.
|
| 79 |
+
|
| 80 |
+
L3 reasoning results are deferred to Appendix~D due to a ceiling effect: the CT judge (GPT-4o-mini) assigns 4.4--5.0/5.0, confounding cross-domain comparison.
|
| 81 |
+
|
| 82 |
+
\begin{table}[t]
|
| 83 |
+
\centering
|
| 84 |
+
\caption{LLM cross-domain results (best configuration per domain). L3 omitted from main text (see Appendix~D). $\dagger$Contamination gap: pre-2015 minus post-2020 accuracy.}
|
| 85 |
+
\label{tab:llm_results}
|
| 86 |
+
\small
|
| 87 |
+
\begin{tabular}{@{}llccc@{}}
|
| 88 |
+
\toprule
|
| 89 |
+
\textbf{Level} & \textbf{Metric} & \textbf{DTI} & \textbf{CT} & \textbf{PPI} \\
|
| 90 |
+
\midrule
|
| 91 |
+
L1 MCQ & Accuracy & Llama\,0.991 & Gemini\,0.667 & Llama\,1.000 \\
|
| 92 |
+
L2 Extract & Field F1 & --- & Qwen\,0.81 & Haiku\,1.000 \\
|
| 93 |
+
\midrule
|
| 94 |
+
\textbf{L4 Discrim} & \textbf{MCC} & \textbf{Llama\,0.184} & \textbf{Gemini\,0.563} & \textbf{Llama\,0.441} \\
|
| 95 |
+
L4 Contam. & Gap$^\dagger$ & $<$0.15 & $<$0.15 & \textbf{0.36--0.61} \\
|
| 96 |
+
\midrule
|
| 97 |
+
Hallucination & Rate & 100\% & 100\% & 100\% \\
|
| 98 |
+
\bottomrule
|
| 99 |
+
\end{tabular}
|
| 100 |
+
\end{table}
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
\subsection{The Opacity Gradient}
|
| 104 |
+
\label{sec:exp-gradient}
|
| 105 |
+
|
| 106 |
+
The L4 discrimination results in Section~\ref{sec:exp-llm} reveal a pattern we term \emph{the opacity gradient}: LLM performance correlates not with biological task difficulty, but with the accessibility of domain data in LLM training corpora (Figure~\ref{fig:gradient}).
|
| 107 |
+
|
| 108 |
+
\textbf{DTI data is opaque.} ChEMBL bioactivity tables store compound--target interactions in structured databases behind query interfaces. Individual IC$_{50}$ values are unlikely to appear in web crawls used for LLM pretraining. Result: MCC $\leq$ 0.18 (near random).
|
| 109 |
+
|
| 110 |
+
\textbf{PPI data is crawlable.} IntAct and STRING expose protein interaction data through publicly crawlable web pages and bulk downloads frequently indexed by search engines. Result: MCC 0.33--0.44 (moderate), but contamination analysis reveals this is memorization.
|
| 111 |
+
|
| 112 |
+
\textbf{CT data is public.} ClinicalTrials.gov trial records are heavily discussed in news articles, regulatory filings, investor reports, and medical literature. Result: MCC 0.48--0.56 (meaningful discrimination).
|
| 113 |
+
|
| 114 |
+
\textbf{PPI contamination confirmed.} All five models show large temporal accuracy gaps (0.36--0.61) between pre-2015 and post-2020 interaction pairs, far exceeding the 0.15 contamination threshold~\citep{balloccu2024leak}. Pre-2015 pairs---available in training data---are classified with 40--79\% accuracy, while post-2020 pairs drop to 2--24\%. To rule out protein popularity as a confound, we stratify by protein degree (Appendix~E): contamination persists in both high-degree and low-degree protein pairs (gaps 0.33--0.58), confirming genuine memorization rather than popularity bias.
|
| 115 |
+
|
| 116 |
+
No contamination is detected for DTI (MCC too low to measure temporal effects) or CT (gaps $<$0.15).
|
| 117 |
+
|
| 118 |
+
\textbf{Implication.} L4 performance reflects training data composition, not biological reasoning capability. LLMs are fundamentally \emph{failure-blind}---they cannot distinguish tested-negative from untested pairs without prior memorization. This finding has direct implications for LLM deployment in drug discovery and clinical trial design, where hallucinated confidence in untested hypotheses could misdirect research effort.
|
| 119 |
+
|
| 120 |
+
\begin{figure}[t]
|
| 121 |
+
\centering
|
| 122 |
+
\includegraphics[width=\textwidth]{figures/fig3_l4_gradient.pdf}
|
| 123 |
+
\caption{The opacity gradient. \textbf{(a)} L4 discrimination (MCC) across domains for four common models (+Haiku for CT/PPI). Performance increases with data accessibility: DTI (opaque databases) $\to$ PPI (crawlable) $\to$ CT (public). Dashed line: MCC~=~0. \textbf{(b)} PPI contamination: pre-2015 vs.\ post-2020 accuracy. All models show $>$0.35 gaps, confirming memorization. $\Delta$ values in red.}
|
| 124 |
+
\label{fig:gradient}
|
| 125 |
+
\end{figure}
|
paper/sections/introduction.tex
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
\section{Introduction}
|
| 2 |
+
\label{sec:introduction}
|
| 3 |
+
|
| 4 |
+
Approximately 90\% of experiments produce null or inconclusive results, yet publication bias ensures the vast majority remain unreported~\citep{fanelli2012negative,mlinaric2017dealing}. This creates a systematic blind spot in biomedical AI: machine learning benchmarks treat untested compound--target pairs as negatives~\citep{huang2021therapeutics,mysinger2012dude}, an assumption that inflates reported performance by up to +0.112 LogAUC and produces models incapable of distinguishing genuinely tested inactive pairs from untested ones (MCC $\leq$ 0.18). Meanwhile, large language models confidently generate hallucinated evidence for 100\% of queried negative results, regardless of whether the experiment was ever conducted.
|
| 5 |
+
|
| 6 |
+
The consequences span three biomedical domains where negative results carry critical information. In drug--target interaction (DTI), less than 1\% of compound--target space has been experimentally tested~\citep{gaulton2017chembl}, yet benchmarks like TDC~\citep{huang2021therapeutics} and DUD-E~\citep{mysinger2012dude} assume all untested pairs are negative. In clinical trials (CT), failed trials vastly outnumber successes but lack structured representation---the AACT database~\citep{tasneem2012aact} contains 216K trials with rich failure metadata that no existing benchmark leverages. In protein--protein interaction (PPI), systematic screens such as HuRI~\citep{luck2020huri} produce millions of confirmed non-interactions that benchmarks ignore in favor of random negative sampling.
|
| 7 |
+
|
| 8 |
+
No existing benchmark evaluates negative result understanding. ChemBench~\citep{mirza2024chembench} tests chemical property reasoning; Mol-Instructions~\citep{fang2024molinstructions} evaluates molecular understanding; LAB-Bench~\citep{laurent2024labbench} measures laboratory skills; MedQA~\citep{jin2021medqa} tests clinical knowledge---but none specifically address the challenge of distinguishing tested from untested hypotheses, which is fundamental to the scientific process. Recent work on DTI negative evidence~\citep{li2025evidti} and benchmark quality~\citep{volkov2025welqrate,tran2020litpcba} highlights the growing recognition of this gap, but remains limited to single domains.
|
| 9 |
+
|
| 10 |
+
We address this gap with four contributions:
|
| 11 |
+
|
| 12 |
+
\begin{enumerate}[nosep,leftmargin=*]
|
| 13 |
+
\item \textbf{NegBioDB}: the first multi-domain database of experimentally confirmed negative results, aggregating 32.9M entries from 12 data sources across DTI, CT, and PPI with four confidence tiers (Section~\ref{sec:database}). Released under CC BY-SA 4.0 with Croissant metadata.
|
| 14 |
+
|
| 15 |
+
\item \textbf{NegBioBench}: a dual ML+LLM benchmark with 4 evaluation levels across 3 domains, totaling 421 experiments (180 ML + 241 LLM) that systematically test both predictive models and language models on negative evidence understanding (Section~\ref{sec:benchmark}).
|
| 16 |
+
|
| 17 |
+
\item \textbf{Negative source inflation}: we demonstrate that control negatives inflate DTI model LogAUC by +0.112 and show model-dependent effects in PPI ($-$0.11 to $+$0.05 LogAUC) and CT ($-$0.16 to $-$0.24 AUROC), challenging the universal assumption that negative source does not matter (Section~\ref{sec:exp-inflation}).
|
| 18 |
+
|
| 19 |
+
\item \textbf{The opacity gradient}: L4 discrimination performance correlates with data accessibility in LLM training corpora (DTI MCC 0.18 $\to$ PPI 0.44 $\to$ CT 0.56), with PPI contamination confirmed as genuine memorization via temporal stratification and protein degree analysis (Section~\ref{sec:exp-gradient}).
|
| 20 |
+
\end{enumerate}
|
pyproject.toml
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "negbiodb"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = "Negative Results Database for Drug-Target Interactions, Clinical Trial Failures, Protein-Protein Interactions, and Gene Essentiality"
|
| 5 |
+
license = {text = "CC-BY-SA-4.0"}
|
| 6 |
+
requires-python = ">=3.11"
|
| 7 |
+
dependencies = [
|
| 8 |
+
"numpy>=2.0",
|
| 9 |
+
"pandas>=2.0",
|
| 10 |
+
"pyarrow>=14.0",
|
| 11 |
+
"tqdm>=4.60",
|
| 12 |
+
"scikit-learn>=1.3",
|
| 13 |
+
"pyyaml>=6.0",
|
| 14 |
+
"requests>=2.28",
|
| 15 |
+
"rdkit>=2024.3",
|
| 16 |
+
"chembl-downloader>=0.4",
|
| 17 |
+
"mlcroissant>=1.0",
|
| 18 |
+
"rapidfuzz>=3.0",
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
[project.optional-dependencies]
|
| 22 |
+
dev = [
|
| 23 |
+
"pytest>=7.0",
|
| 24 |
+
"pytest-cov>=4.0",
|
| 25 |
+
]
|
| 26 |
+
ml = [
|
| 27 |
+
"torch>=2.2",
|
| 28 |
+
"torch-geometric>=2.5",
|
| 29 |
+
"xgboost>=2.0",
|
| 30 |
+
"pymetis>=2023.1",
|
| 31 |
+
# torch-scatter and torch-sparse require CUDA-specific wheels;
|
| 32 |
+
# install manually on HPC: see slurm/setup_env.sh
|
| 33 |
+
]
|
| 34 |
+
|
| 35 |
+
[build-system]
|
| 36 |
+
requires = ["hatchling"]
|
| 37 |
+
build-backend = "hatchling.build"
|
| 38 |
+
|
| 39 |
+
[tool.hatch.build.targets.wheel]
|
| 40 |
+
packages = ["src/negbiodb", "src/negbiodb_ct", "src/negbiodb_ppi", "src/negbiodb_depmap"]
|
| 41 |
+
|
| 42 |
+
[tool.pytest.ini_options]
|
| 43 |
+
testpaths = ["tests"]
|
| 44 |
+
pythonpath = ["src"]
|
| 45 |
+
markers = ["integration: marks tests requiring network access (deselect with '-m \"not integration\"')"]
|
scripts/analyze_l4_contamination.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Analyze L4 temporal accuracy for contamination detection.
|
| 3 |
+
|
| 4 |
+
Reads existing results.json files from results/llm/l4_*/ and reports
|
| 5 |
+
accuracy_pre_2023 vs accuracy_post_2024 gap per model/config.
|
| 6 |
+
|
| 7 |
+
A gap > 0.15 flags potential training data contamination.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import json
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
|
| 13 |
+
PROJECT_ROOT = Path(__file__).resolve().parent.parent
|
| 14 |
+
RESULTS_DIR = PROJECT_ROOT / "results" / "llm"
|
| 15 |
+
THRESHOLD = 0.15
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def main():
|
| 19 |
+
rows = []
|
| 20 |
+
for run_dir in sorted(RESULTS_DIR.iterdir()):
|
| 21 |
+
if not run_dir.is_dir() or not run_dir.name.startswith("l4_"):
|
| 22 |
+
continue
|
| 23 |
+
results_file = run_dir / "results.json"
|
| 24 |
+
if not results_file.exists():
|
| 25 |
+
continue
|
| 26 |
+
|
| 27 |
+
with open(results_file) as f:
|
| 28 |
+
m = json.load(f)
|
| 29 |
+
|
| 30 |
+
name = run_dir.name
|
| 31 |
+
pre = m.get("accuracy_pre_2023")
|
| 32 |
+
post = m.get("accuracy_post_2024")
|
| 33 |
+
gap = round(pre - post, 4) if pre is not None and post is not None else None
|
| 34 |
+
flag = gap > THRESHOLD if gap is not None else None
|
| 35 |
+
|
| 36 |
+
rows.append({
|
| 37 |
+
"run": name,
|
| 38 |
+
"accuracy": m.get("accuracy"),
|
| 39 |
+
"mcc": m.get("mcc"),
|
| 40 |
+
"pre_2023": pre,
|
| 41 |
+
"post_2024": post,
|
| 42 |
+
"gap": gap,
|
| 43 |
+
"flag": flag,
|
| 44 |
+
})
|
| 45 |
+
|
| 46 |
+
if not rows:
|
| 47 |
+
print("No L4 results found.")
|
| 48 |
+
return
|
| 49 |
+
|
| 50 |
+
# Print table
|
| 51 |
+
print(f"{'Run':<55} {'Acc':>5} {'MCC':>6} {'Pre23':>6} {'Post24':>6} {'Gap':>6} {'Flag'}")
|
| 52 |
+
print("-" * 100)
|
| 53 |
+
for r in rows:
|
| 54 |
+
flag_str = "YES" if r["flag"] else ("NO" if r["flag"] is not None else "N/A")
|
| 55 |
+
print(
|
| 56 |
+
f"{r['run']:<55} "
|
| 57 |
+
f"{r['accuracy']:>5.3f} "
|
| 58 |
+
f"{r['mcc']:>6.3f} "
|
| 59 |
+
f"{r['pre_2023']:>6.3f} "
|
| 60 |
+
f"{r['post_2024']:>6.3f} "
|
| 61 |
+
f"{r['gap']:>6.3f} "
|
| 62 |
+
f"{flag_str}"
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
# Summary by model
|
| 66 |
+
from collections import defaultdict
|
| 67 |
+
model_gaps = defaultdict(list)
|
| 68 |
+
for r in rows:
|
| 69 |
+
# Extract model from run name
|
| 70 |
+
parts = r["run"][3:] # strip "l4_"
|
| 71 |
+
if parts.endswith("_zero-shot"):
|
| 72 |
+
model = parts[:-10]
|
| 73 |
+
elif parts.endswith("_3-shot"):
|
| 74 |
+
model = parts[:-7]
|
| 75 |
+
else:
|
| 76 |
+
model = parts
|
| 77 |
+
model = model.rsplit("_fs", 1)[0]
|
| 78 |
+
if r["gap"] is not None:
|
| 79 |
+
model_gaps[model].append(r["gap"])
|
| 80 |
+
|
| 81 |
+
print("\n--- Summary by Model ---")
|
| 82 |
+
print(f"{'Model':<40} {'Mean Gap':>8} {'Flag'}")
|
| 83 |
+
print("-" * 55)
|
| 84 |
+
for model, gaps in sorted(model_gaps.items()):
|
| 85 |
+
mean_gap = sum(gaps) / len(gaps)
|
| 86 |
+
flag = "YES" if mean_gap > THRESHOLD else "NO"
|
| 87 |
+
print(f"{model:<40} {mean_gap:>8.4f} {flag}")
|
| 88 |
+
|
| 89 |
+
print(f"\nThreshold: {THRESHOLD}")
|
| 90 |
+
flagged = sum(1 for r in rows if r["flag"])
|
| 91 |
+
print(f"Flagged runs: {flagged}/{len(rows)}")
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
if __name__ == "__main__":
|
| 95 |
+
main()
|
scripts/build_compound_names.py
ADDED
|
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Build compound name cache from ChEMBL + PubChem synonyms.
|
| 3 |
+
|
| 4 |
+
Output: exports/compound_names.parquet
|
| 5 |
+
Columns: compound_id, chembl_id, pubchem_cid, pref_name, name_source
|
| 6 |
+
|
| 7 |
+
Priority: ChEMBL pref_name > ChEMBL compound_records > PubChem synonym > None
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import argparse
|
| 11 |
+
import json
|
| 12 |
+
import sqlite3
|
| 13 |
+
import time
|
| 14 |
+
import urllib.request
|
| 15 |
+
import urllib.error
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
|
| 18 |
+
import pandas as pd
|
| 19 |
+
|
| 20 |
+
PROJECT_ROOT = Path(__file__).resolve().parent.parent
|
| 21 |
+
NEGBIODB_PATH = PROJECT_ROOT / "data" / "negbiodb.db"
|
| 22 |
+
CHEMBL_PATH = PROJECT_ROOT / "data" / "chembl" / "chembl_36.db"
|
| 23 |
+
OUTPUT_PATH = PROJECT_ROOT / "exports" / "compound_names.parquet"
|
| 24 |
+
|
| 25 |
+
PUBCHEM_BATCH_SIZE = 100 # CIDs per request (conservative)
|
| 26 |
+
PUBCHEM_DELAY = 0.25 # seconds between requests (PubChem allows 5/sec)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def load_chembl_names(chembl_db: Path, our_chembl_ids: set[str]) -> dict[str, tuple[str, str]]:
|
| 30 |
+
"""Load ChEMBL ID -> (name, source) from pref_name + compound_records.
|
| 31 |
+
|
| 32 |
+
Priority: pref_name > shortest compound_records name.
|
| 33 |
+
"""
|
| 34 |
+
conn = sqlite3.connect(str(chembl_db))
|
| 35 |
+
|
| 36 |
+
# Phase 1a: pref_name (highest quality - curated drug names)
|
| 37 |
+
pref_names = dict(
|
| 38 |
+
conn.execute(
|
| 39 |
+
"SELECT chembl_id, pref_name FROM molecule_dictionary "
|
| 40 |
+
"WHERE pref_name IS NOT NULL"
|
| 41 |
+
).fetchall()
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
# Phase 1b: compound_records names (broader coverage)
|
| 45 |
+
# Get molregno -> chembl_id for our compounds
|
| 46 |
+
molregno_to_chembl = {}
|
| 47 |
+
for chembl_id, molregno in conn.execute(
|
| 48 |
+
"SELECT chembl_id, molregno FROM molecule_dictionary"
|
| 49 |
+
):
|
| 50 |
+
if chembl_id in our_chembl_ids:
|
| 51 |
+
molregno_to_chembl[molregno] = chembl_id
|
| 52 |
+
|
| 53 |
+
# Batch query compound_records for shortest name per molregno
|
| 54 |
+
cr_names = {}
|
| 55 |
+
molregnos = list(molregno_to_chembl.keys())
|
| 56 |
+
batch_size = 5000
|
| 57 |
+
for i in range(0, len(molregnos), batch_size):
|
| 58 |
+
batch = molregnos[i:i + batch_size]
|
| 59 |
+
placeholders = ",".join("?" * len(batch))
|
| 60 |
+
rows = conn.execute(
|
| 61 |
+
f"SELECT molregno, MIN(compound_name) "
|
| 62 |
+
f"FROM compound_records "
|
| 63 |
+
f"WHERE molregno IN ({placeholders}) "
|
| 64 |
+
f" AND compound_name IS NOT NULL "
|
| 65 |
+
f" AND compound_name != '' "
|
| 66 |
+
f" AND LENGTH(compound_name) < 200 "
|
| 67 |
+
f"GROUP BY molregno",
|
| 68 |
+
batch,
|
| 69 |
+
).fetchall()
|
| 70 |
+
for molregno, name in rows:
|
| 71 |
+
chembl_id = molregno_to_chembl[molregno]
|
| 72 |
+
cr_names[chembl_id] = name
|
| 73 |
+
|
| 74 |
+
conn.close()
|
| 75 |
+
|
| 76 |
+
# Merge: pref_name > compound_records
|
| 77 |
+
result = {}
|
| 78 |
+
for chembl_id in our_chembl_ids:
|
| 79 |
+
if chembl_id in pref_names:
|
| 80 |
+
result[chembl_id] = (pref_names[chembl_id], "chembl_pref")
|
| 81 |
+
elif chembl_id in cr_names:
|
| 82 |
+
result[chembl_id] = (cr_names[chembl_id], "chembl_record")
|
| 83 |
+
|
| 84 |
+
return result
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def load_negbiodb_compounds(db_path: Path) -> pd.DataFrame:
|
| 88 |
+
"""Load compound_id, chembl_id, pubchem_cid from NegBioDB."""
|
| 89 |
+
conn = sqlite3.connect(str(db_path))
|
| 90 |
+
df = pd.read_sql_query(
|
| 91 |
+
"SELECT compound_id, chembl_id, pubchem_cid FROM compounds", conn
|
| 92 |
+
)
|
| 93 |
+
conn.close()
|
| 94 |
+
return df
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def fetch_pubchem_names_batch(cids: list[int]) -> dict[int, str]:
|
| 98 |
+
"""Fetch preferred names for a batch of PubChem CIDs via PUG REST synonyms."""
|
| 99 |
+
if not cids:
|
| 100 |
+
return {}
|
| 101 |
+
|
| 102 |
+
url = "https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/cid/synonyms/JSON"
|
| 103 |
+
data = f"cid={','.join(str(c) for c in cids)}".encode("utf-8")
|
| 104 |
+
|
| 105 |
+
req = urllib.request.Request(
|
| 106 |
+
url,
|
| 107 |
+
data=data,
|
| 108 |
+
method="POST",
|
| 109 |
+
headers={"Content-Type": "application/x-www-form-urlencoded"},
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
try:
|
| 113 |
+
with urllib.request.urlopen(req, timeout=30) as resp:
|
| 114 |
+
result = json.loads(resp.read())
|
| 115 |
+
except (urllib.error.HTTPError, urllib.error.URLError, TimeoutError) as e:
|
| 116 |
+
print(f" PubChem batch error ({len(cids)} CIDs): {e}")
|
| 117 |
+
return {}
|
| 118 |
+
|
| 119 |
+
names = {}
|
| 120 |
+
for entry in result.get("InformationList", {}).get("Information", []):
|
| 121 |
+
cid = entry.get("CID")
|
| 122 |
+
synonyms = entry.get("Synonym", [])
|
| 123 |
+
if cid and synonyms:
|
| 124 |
+
name = synonyms[0]
|
| 125 |
+
if not name.isdigit() and len(name) < 200:
|
| 126 |
+
names[cid] = name
|
| 127 |
+
return names
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def fetch_pubchem_names(
|
| 131 |
+
cids: list[int], batch_size: int = PUBCHEM_BATCH_SIZE
|
| 132 |
+
) -> dict[int, str]:
|
| 133 |
+
"""Fetch names for all CIDs in batches."""
|
| 134 |
+
all_names = {}
|
| 135 |
+
total_batches = (len(cids) + batch_size - 1) // batch_size
|
| 136 |
+
|
| 137 |
+
for i in range(0, len(cids), batch_size):
|
| 138 |
+
batch = cids[i : i + batch_size]
|
| 139 |
+
batch_num = i // batch_size + 1
|
| 140 |
+
|
| 141 |
+
if batch_num % 50 == 0 or batch_num == 1:
|
| 142 |
+
print(
|
| 143 |
+
f" PubChem batch {batch_num}/{total_batches} "
|
| 144 |
+
f"({len(all_names)} names so far)"
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
names = fetch_pubchem_names_batch(batch)
|
| 148 |
+
all_names.update(names)
|
| 149 |
+
time.sleep(PUBCHEM_DELAY)
|
| 150 |
+
|
| 151 |
+
return all_names
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def main():
|
| 155 |
+
parser = argparse.ArgumentParser(description="Build compound name cache")
|
| 156 |
+
parser.add_argument(
|
| 157 |
+
"--skip-pubchem",
|
| 158 |
+
action="store_true",
|
| 159 |
+
help="Skip PubChem API calls (ChEMBL only)",
|
| 160 |
+
)
|
| 161 |
+
parser.add_argument(
|
| 162 |
+
"--pubchem-limit",
|
| 163 |
+
type=int,
|
| 164 |
+
default=0,
|
| 165 |
+
help="Max PubChem CIDs to query (0=all)",
|
| 166 |
+
)
|
| 167 |
+
args = parser.parse_args()
|
| 168 |
+
|
| 169 |
+
print("Loading NegBioDB compounds...")
|
| 170 |
+
df = load_negbiodb_compounds(NEGBIODB_PATH)
|
| 171 |
+
print(f" {len(df)} compounds total")
|
| 172 |
+
print(f" {df['chembl_id'].notna().sum()} with chembl_id")
|
| 173 |
+
print(f" {df['pubchem_cid'].notna().sum()} with pubchem_cid")
|
| 174 |
+
|
| 175 |
+
# Phase 1: ChEMBL (pref_name + compound_records)
|
| 176 |
+
our_chembl_ids = set(df.loc[df["chembl_id"].notna(), "chembl_id"])
|
| 177 |
+
print(f"\nPhase 1: ChEMBL name lookup ({len(our_chembl_ids)} compounds)...")
|
| 178 |
+
chembl_names = load_chembl_names(CHEMBL_PATH, our_chembl_ids)
|
| 179 |
+
n_pref = sum(1 for _, s in chembl_names.values() if s == "chembl_pref")
|
| 180 |
+
n_rec = sum(1 for _, s in chembl_names.values() if s == "chembl_record")
|
| 181 |
+
print(f" pref_name: {n_pref}")
|
| 182 |
+
print(f" compound_records: {n_rec}")
|
| 183 |
+
print(f" Total: {len(chembl_names)}/{len(our_chembl_ids)}")
|
| 184 |
+
|
| 185 |
+
df["pref_name"] = None
|
| 186 |
+
df["name_source"] = None
|
| 187 |
+
|
| 188 |
+
mask_chembl = df["chembl_id"].notna()
|
| 189 |
+
for idx in df[mask_chembl].index:
|
| 190 |
+
cid = df.at[idx, "chembl_id"]
|
| 191 |
+
if cid in chembl_names:
|
| 192 |
+
name, source = chembl_names[cid]
|
| 193 |
+
df.at[idx, "pref_name"] = name
|
| 194 |
+
df.at[idx, "name_source"] = source
|
| 195 |
+
|
| 196 |
+
# Phase 2: PubChem synonyms (for compounds without ChEMBL names)
|
| 197 |
+
if not args.skip_pubchem:
|
| 198 |
+
need_name = df["pref_name"].isna() & df["pubchem_cid"].notna()
|
| 199 |
+
cids_to_query = (
|
| 200 |
+
df.loc[need_name, "pubchem_cid"].dropna().astype(int).tolist()
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
if args.pubchem_limit > 0:
|
| 204 |
+
cids_to_query = cids_to_query[: args.pubchem_limit]
|
| 205 |
+
|
| 206 |
+
print(f"\nPhase 2: PubChem synonym lookup ({len(cids_to_query)} CIDs)...")
|
| 207 |
+
|
| 208 |
+
if cids_to_query:
|
| 209 |
+
pubchem_names = fetch_pubchem_names(cids_to_query)
|
| 210 |
+
print(f" Retrieved {len(pubchem_names)} names from PubChem")
|
| 211 |
+
|
| 212 |
+
for idx in df[need_name].index:
|
| 213 |
+
cid = df.at[idx, "pubchem_cid"]
|
| 214 |
+
if pd.notna(cid) and int(cid) in pubchem_names:
|
| 215 |
+
df.at[idx, "pref_name"] = pubchem_names[int(cid)]
|
| 216 |
+
df.at[idx, "name_source"] = "pubchem"
|
| 217 |
+
else:
|
| 218 |
+
print("\nPhase 2: Skipped (--skip-pubchem)")
|
| 219 |
+
|
| 220 |
+
# Summary
|
| 221 |
+
named = df["pref_name"].notna().sum()
|
| 222 |
+
print(f"\n=== Summary ===")
|
| 223 |
+
print(f"Total compounds: {len(df)}")
|
| 224 |
+
print(f"With name: {named} ({100 * named / len(df):.1f}%)")
|
| 225 |
+
by_source = df["name_source"].value_counts()
|
| 226 |
+
for source, count in by_source.items():
|
| 227 |
+
print(f" {source}: {count}")
|
| 228 |
+
print(f"Without name: {len(df) - named}")
|
| 229 |
+
|
| 230 |
+
# Save
|
| 231 |
+
OUTPUT_PATH.parent.mkdir(parents=True, exist_ok=True)
|
| 232 |
+
df.to_parquet(OUTPUT_PATH, index=False)
|
| 233 |
+
print(f"\nSaved to {OUTPUT_PATH}")
|
| 234 |
+
print(f" Size: {OUTPUT_PATH.stat().st_size / 1024 / 1024:.1f} MB")
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
if __name__ == "__main__":
|
| 238 |
+
main()
|
scripts/build_l1_dataset.py
ADDED
|
@@ -0,0 +1,845 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Build L1 MCQ dataset for LLM benchmark.
|
| 3 |
+
|
| 4 |
+
Generates 2,000 multiple-choice questions across 4 classes:
|
| 5 |
+
A) Active (400) — confirmed active, ChEMBL positives pChEMBL ≥ 6
|
| 6 |
+
B) Inactive (800) — confirmed inactive, NegBioDB silver tier
|
| 7 |
+
C) Inconclusive (400) — ambiguous evidence (bronze tier, borderline)
|
| 8 |
+
D) Conditionally active (400) — cross-target selectivity compounds
|
| 9 |
+
|
| 10 |
+
Difficulty: Easy 40% / Medium 35% / Hard 25%
|
| 11 |
+
Split: 200 few-shot (50/class) + 200 val (50/class) + 1,600 test
|
| 12 |
+
|
| 13 |
+
Output: exports/llm_benchmarks/l1_mcq.jsonl
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import argparse
|
| 17 |
+
import json
|
| 18 |
+
import random
|
| 19 |
+
import sqlite3
|
| 20 |
+
import time
|
| 21 |
+
import urllib.request
|
| 22 |
+
import urllib.error
|
| 23 |
+
from pathlib import Path
|
| 24 |
+
|
| 25 |
+
import pandas as pd
|
| 26 |
+
|
| 27 |
+
PROJECT_ROOT = Path(__file__).resolve().parent.parent
|
| 28 |
+
NEGBIODB_PATH = PROJECT_ROOT / "data" / "negbiodb.db"
|
| 29 |
+
CHEMBL_PATH = PROJECT_ROOT / "data" / "chembl" / "chembl_36.db"
|
| 30 |
+
POSITIVES_PATH = PROJECT_ROOT / "exports" / "chembl_positives_pchembl6.parquet"
|
| 31 |
+
M1_PATH = PROJECT_ROOT / "exports" / "negbiodb_m1_balanced.parquet"
|
| 32 |
+
NAMES_PATH = PROJECT_ROOT / "exports" / "compound_names.parquet"
|
| 33 |
+
OUTPUT_PATH = PROJECT_ROOT / "exports" / "llm_benchmarks" / "l1_mcq.jsonl"
|
| 34 |
+
|
| 35 |
+
# Class sizes
|
| 36 |
+
N_ACTIVE = 400
|
| 37 |
+
N_INACTIVE = 800
|
| 38 |
+
N_INCONCLUSIVE = 400
|
| 39 |
+
N_CONDITIONAL = 400
|
| 40 |
+
|
| 41 |
+
# L-7: Max times a compound can appear within a single class.
|
| 42 |
+
# Set to 12 to accommodate DAVIS kinase panel (68 compounds × 375 targets).
|
| 43 |
+
# Prevents extreme dominance while allowing natural assay panel structure.
|
| 44 |
+
MAX_PER_COMPOUND = 12
|
| 45 |
+
|
| 46 |
+
# Difficulty proportions
|
| 47 |
+
FRAC_EASY = 0.40
|
| 48 |
+
FRAC_MEDIUM = 0.35
|
| 49 |
+
FRAC_HARD = 0.25
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def load_compound_names() -> dict:
|
| 53 |
+
"""Load compound name cache: compound_id -> pref_name."""
|
| 54 |
+
df = pd.read_parquet(NAMES_PATH)
|
| 55 |
+
# Build lookup by compound_id
|
| 56 |
+
id_names = {}
|
| 57 |
+
for _, row in df.iterrows():
|
| 58 |
+
if pd.notna(row["pref_name"]):
|
| 59 |
+
id_names[int(row["compound_id"])] = row["pref_name"]
|
| 60 |
+
return id_names
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def load_target_info(db_path: Path) -> dict:
|
| 64 |
+
"""Load target info: target_id -> {uniprot, gene_symbol, family}."""
|
| 65 |
+
conn = sqlite3.connect(str(db_path))
|
| 66 |
+
rows = conn.execute(
|
| 67 |
+
"SELECT target_id, uniprot_accession, gene_symbol, target_family "
|
| 68 |
+
"FROM targets"
|
| 69 |
+
).fetchall()
|
| 70 |
+
conn.close()
|
| 71 |
+
return {
|
| 72 |
+
r[0]: {"uniprot": r[1], "gene_symbol": r[2], "family": r[3]}
|
| 73 |
+
for r in rows
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def load_compound_ids(db_path: Path) -> dict:
|
| 78 |
+
"""Load compound lookup: inchikey -> compound_id and chembl_id -> compound_id."""
|
| 79 |
+
conn = sqlite3.connect(str(db_path))
|
| 80 |
+
rows = conn.execute(
|
| 81 |
+
"SELECT compound_id, inchikey, chembl_id, pubchem_cid FROM compounds"
|
| 82 |
+
).fetchall()
|
| 83 |
+
conn.close()
|
| 84 |
+
ik_map = {}
|
| 85 |
+
chembl_map = {}
|
| 86 |
+
for cid, ik, chembl, pcid in rows:
|
| 87 |
+
if ik:
|
| 88 |
+
ik_map[ik] = cid
|
| 89 |
+
if chembl:
|
| 90 |
+
chembl_map[chembl] = cid
|
| 91 |
+
return ik_map, chembl_map
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def fetch_pubchem_names(cids: list[int]) -> dict[int, str]:
|
| 95 |
+
"""Fetch names for PubChem CIDs not in cache."""
|
| 96 |
+
if not cids:
|
| 97 |
+
return {}
|
| 98 |
+
all_names = {}
|
| 99 |
+
batch_size = 100
|
| 100 |
+
for i in range(0, len(cids), batch_size):
|
| 101 |
+
batch = cids[i : i + batch_size]
|
| 102 |
+
url = "https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/cid/synonyms/JSON"
|
| 103 |
+
data = f"cid={','.join(str(c) for c in batch)}".encode()
|
| 104 |
+
req = urllib.request.Request(
|
| 105 |
+
url,
|
| 106 |
+
data=data,
|
| 107 |
+
method="POST",
|
| 108 |
+
headers={"Content-Type": "application/x-www-form-urlencoded"},
|
| 109 |
+
)
|
| 110 |
+
try:
|
| 111 |
+
with urllib.request.urlopen(req, timeout=30) as resp:
|
| 112 |
+
result = json.loads(resp.read())
|
| 113 |
+
for entry in result.get("InformationList", {}).get("Information", []):
|
| 114 |
+
cid = entry.get("CID")
|
| 115 |
+
synonyms = entry.get("Synonym", [])
|
| 116 |
+
if cid and synonyms and not synonyms[0].isdigit():
|
| 117 |
+
all_names[cid] = synonyms[0]
|
| 118 |
+
except Exception:
|
| 119 |
+
pass
|
| 120 |
+
time.sleep(0.3)
|
| 121 |
+
return all_names
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
# ── Class A: Active ──────────────────────────────────────────────────────────
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def select_active(positives_df: pd.DataFrame, names: dict,
|
| 128 |
+
ik_to_cid: dict, n: int, seed: int) -> list[dict]:
|
| 129 |
+
"""Select n active compound-target pairs from ChEMBL positives."""
|
| 130 |
+
rng = random.Random(seed)
|
| 131 |
+
df = positives_df.copy()
|
| 132 |
+
|
| 133 |
+
# Map inchikey -> compound_id for name lookup
|
| 134 |
+
df["compound_id"] = df["inchikey"].map(ik_to_cid)
|
| 135 |
+
|
| 136 |
+
# Filter to compounds with names (for better MCQ quality)
|
| 137 |
+
df["compound_name"] = df["compound_id"].map(names)
|
| 138 |
+
named = df[df["compound_name"].notna()].copy()
|
| 139 |
+
print(f" Active: {len(named)}/{len(df)} have names")
|
| 140 |
+
|
| 141 |
+
# Stratify by difficulty (based on pchembl)
|
| 142 |
+
easy = named[named["pchembl_value"] > 7.5].copy()
|
| 143 |
+
medium = named[
|
| 144 |
+
(named["pchembl_value"] > 6.5) & (named["pchembl_value"] <= 7.5)
|
| 145 |
+
].copy()
|
| 146 |
+
hard = named[named["pchembl_value"] <= 6.5].copy()
|
| 147 |
+
|
| 148 |
+
n_easy = int(n * FRAC_EASY)
|
| 149 |
+
n_medium = int(n * FRAC_MEDIUM)
|
| 150 |
+
n_hard = n - n_easy - n_medium
|
| 151 |
+
|
| 152 |
+
# Sample from each difficulty band
|
| 153 |
+
selected = []
|
| 154 |
+
for subset, count, diff in [
|
| 155 |
+
(easy, n_easy, "easy"),
|
| 156 |
+
(medium, n_medium, "medium"),
|
| 157 |
+
(hard, n_hard, "hard"),
|
| 158 |
+
]:
|
| 159 |
+
# Diversify: max 1 per UniProt to spread across targets
|
| 160 |
+
by_target = subset.groupby("uniprot_id")
|
| 161 |
+
pool = []
|
| 162 |
+
for _, group in by_target:
|
| 163 |
+
pool.append(group.sample(1, random_state=seed))
|
| 164 |
+
if pool:
|
| 165 |
+
pool_df = pd.concat(pool)
|
| 166 |
+
if len(pool_df) < count:
|
| 167 |
+
# Need more — allow duplicates per target
|
| 168 |
+
extra = subset[~subset.index.isin(pool_df.index)]
|
| 169 |
+
pool_df = pd.concat([pool_df, extra]).head(count * 3)
|
| 170 |
+
sampled = pool_df.sample(min(count, len(pool_df)), random_state=seed)
|
| 171 |
+
else:
|
| 172 |
+
sampled = subset.sample(min(count, len(subset)), random_state=seed)
|
| 173 |
+
|
| 174 |
+
for _, row in sampled.iterrows():
|
| 175 |
+
selected.append(
|
| 176 |
+
{
|
| 177 |
+
"class": "active",
|
| 178 |
+
"correct_answer": "A",
|
| 179 |
+
"difficulty": diff,
|
| 180 |
+
"compound_name": row["compound_name"],
|
| 181 |
+
"compound_smiles": row["smiles"],
|
| 182 |
+
"compound_inchikey": row["inchikey"],
|
| 183 |
+
"target_uniprot": row["uniprot_id"],
|
| 184 |
+
"activity_type": row["activity_type"],
|
| 185 |
+
"activity_value_nm": float(row["activity_value_nm"]),
|
| 186 |
+
"pchembl_value": float(row["pchembl_value"]),
|
| 187 |
+
"publication_year": (
|
| 188 |
+
int(row["publication_year"])
|
| 189 |
+
if pd.notna(row.get("publication_year"))
|
| 190 |
+
else None
|
| 191 |
+
),
|
| 192 |
+
"evidence_quality": "gold",
|
| 193 |
+
"source_db": "ChEMBL",
|
| 194 |
+
}
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
rng.shuffle(selected)
|
| 198 |
+
return selected[:n]
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
# ── Class B: Inactive ────────────────────────────────────────────────────────
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def select_inactive(
|
| 205 |
+
db_path: Path, names: dict, target_info: dict, n: int, seed: int
|
| 206 |
+
) -> list[dict]:
|
| 207 |
+
"""Select n inactive pairs from NegBioDB silver tier."""
|
| 208 |
+
conn = sqlite3.connect(str(db_path))
|
| 209 |
+
|
| 210 |
+
# Query silver-tier pairs for compounds with chembl_id (ensures names)
|
| 211 |
+
rows = conn.execute(
|
| 212 |
+
"""
|
| 213 |
+
SELECT ctp.compound_id, ctp.target_id,
|
| 214 |
+
c.canonical_smiles, c.inchikey,
|
| 215 |
+
ctp.num_assays, ctp.num_sources, ctp.median_pchembl,
|
| 216 |
+
ctp.min_activity_value, ctp.max_activity_value,
|
| 217 |
+
ctp.earliest_year, ctp.best_confidence
|
| 218 |
+
FROM compound_target_pairs ctp
|
| 219 |
+
JOIN compounds c ON ctp.compound_id = c.compound_id
|
| 220 |
+
WHERE ctp.best_confidence = 'silver'
|
| 221 |
+
AND c.chembl_id IS NOT NULL
|
| 222 |
+
ORDER BY RANDOM()
|
| 223 |
+
LIMIT ?
|
| 224 |
+
""",
|
| 225 |
+
(n * 10,),
|
| 226 |
+
).fetchall()
|
| 227 |
+
conn.close()
|
| 228 |
+
|
| 229 |
+
cols = [
|
| 230 |
+
"compound_id", "target_id", "smiles", "inchikey",
|
| 231 |
+
"num_assays", "num_sources", "median_pchembl",
|
| 232 |
+
"min_activity_value", "max_activity_value",
|
| 233 |
+
"earliest_year", "best_confidence",
|
| 234 |
+
]
|
| 235 |
+
df = pd.DataFrame(rows, columns=cols)
|
| 236 |
+
|
| 237 |
+
# Add names and target info
|
| 238 |
+
df["compound_name"] = df["compound_id"].map(names)
|
| 239 |
+
named = df[df["compound_name"].notna()].copy()
|
| 240 |
+
print(f" Inactive: {len(named)}/{len(df)} have names")
|
| 241 |
+
|
| 242 |
+
# Add target gene symbols
|
| 243 |
+
named["gene_symbol"] = named["target_id"].map(
|
| 244 |
+
lambda tid: target_info.get(tid, {}).get("gene_symbol")
|
| 245 |
+
)
|
| 246 |
+
named["target_family"] = named["target_id"].map(
|
| 247 |
+
lambda tid: target_info.get(tid, {}).get("family")
|
| 248 |
+
)
|
| 249 |
+
named["target_uniprot"] = named["target_id"].map(
|
| 250 |
+
lambda tid: target_info.get(tid, {}).get("uniprot")
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
# Difficulty based on evidence strength
|
| 254 |
+
# Easy: many assays, clear inactive (low pchembl or null)
|
| 255 |
+
# Medium: fewer assays, moderate evidence
|
| 256 |
+
# Hard: single assay, near threshold
|
| 257 |
+
named["difficulty"] = "medium"
|
| 258 |
+
named.loc[named["num_assays"] >= 3, "difficulty"] = "easy"
|
| 259 |
+
named.loc[named["num_sources"] >= 2, "difficulty"] = "easy"
|
| 260 |
+
named.loc[
|
| 261 |
+
(named["num_assays"] == 1) & (named["num_sources"] == 1), "difficulty"
|
| 262 |
+
] = "hard"
|
| 263 |
+
|
| 264 |
+
n_easy = int(n * FRAC_EASY)
|
| 265 |
+
n_medium = int(n * FRAC_MEDIUM)
|
| 266 |
+
n_hard = n - n_easy - n_medium
|
| 267 |
+
|
| 268 |
+
selected = []
|
| 269 |
+
seen_compounds = set()
|
| 270 |
+
for diff, count in [("easy", n_easy), ("medium", n_medium), ("hard", n_hard)]:
|
| 271 |
+
pool = named[named["difficulty"] == diff]
|
| 272 |
+
# Diversify compounds
|
| 273 |
+
pool = pool[~pool["compound_id"].isin(seen_compounds)]
|
| 274 |
+
sampled = pool.sample(min(count, len(pool)), random_state=seed)
|
| 275 |
+
seen_compounds.update(sampled["compound_id"])
|
| 276 |
+
|
| 277 |
+
for _, row in sampled.iterrows():
|
| 278 |
+
activity_desc = _format_activity(
|
| 279 |
+
row["min_activity_value"], row["max_activity_value"],
|
| 280 |
+
row["median_pchembl"]
|
| 281 |
+
)
|
| 282 |
+
selected.append(
|
| 283 |
+
{
|
| 284 |
+
"class": "inactive",
|
| 285 |
+
"correct_answer": "B",
|
| 286 |
+
"difficulty": diff,
|
| 287 |
+
"compound_name": row["compound_name"],
|
| 288 |
+
"compound_smiles": row["smiles"],
|
| 289 |
+
"compound_inchikey": row["inchikey"],
|
| 290 |
+
"target_uniprot": row["target_uniprot"],
|
| 291 |
+
"target_gene": row["gene_symbol"],
|
| 292 |
+
"target_family": row["target_family"],
|
| 293 |
+
"num_assays": int(row["num_assays"]),
|
| 294 |
+
"num_sources": int(row["num_sources"]),
|
| 295 |
+
"activity_description": activity_desc,
|
| 296 |
+
"pchembl_value": (
|
| 297 |
+
float(row["median_pchembl"])
|
| 298 |
+
if pd.notna(row["median_pchembl"])
|
| 299 |
+
else None
|
| 300 |
+
),
|
| 301 |
+
"publication_year": (
|
| 302 |
+
int(row["earliest_year"])
|
| 303 |
+
if pd.notna(row["earliest_year"])
|
| 304 |
+
else None
|
| 305 |
+
),
|
| 306 |
+
"evidence_quality": "silver",
|
| 307 |
+
"source_db": "NegBioDB",
|
| 308 |
+
}
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
random.Random(seed).shuffle(selected)
|
| 312 |
+
return selected[:n]
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
def _format_activity(min_val, max_val, median_pchembl):
|
| 316 |
+
"""Format activity description for inactive pairs."""
|
| 317 |
+
parts = []
|
| 318 |
+
if pd.notna(min_val):
|
| 319 |
+
if min_val >= 10000:
|
| 320 |
+
parts.append("No significant activity at 10 µM")
|
| 321 |
+
elif min_val >= 1000:
|
| 322 |
+
parts.append(f"Weak activity (>{min_val:.0f} nM)")
|
| 323 |
+
else:
|
| 324 |
+
# min_val < 1000 nM in an inactive pair means inconsistent assay results
|
| 325 |
+
parts.append(
|
| 326 |
+
f"Best measurement: {min_val:.0f} nM "
|
| 327 |
+
f"(inconsistent across assays; classified inactive at 10 µM threshold)"
|
| 328 |
+
)
|
| 329 |
+
if pd.notna(median_pchembl) and median_pchembl > 0:
|
| 330 |
+
parts.append(f"pChEMBL: {median_pchembl:.1f}")
|
| 331 |
+
if not parts:
|
| 332 |
+
parts.append("Inactive (below detection threshold)")
|
| 333 |
+
return "; ".join(parts)
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
# ── Class C: Inconclusive ────────────────────────────────────────────────────
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
def select_inconclusive(
|
| 340 |
+
db_path: Path, names: dict, target_info: dict, n: int, seed: int
|
| 341 |
+
) -> list[dict]:
|
| 342 |
+
"""Select n inconclusive pairs (bronze tier + borderline silver)."""
|
| 343 |
+
conn = sqlite3.connect(str(db_path))
|
| 344 |
+
|
| 345 |
+
# Part 1: Bronze tier (DAVIS) — all are single-assay Kd at threshold
|
| 346 |
+
# DAVIS compounds have no chembl_id, so fetch PubChem names on demand
|
| 347 |
+
bronze_rows = conn.execute(
|
| 348 |
+
"""
|
| 349 |
+
SELECT ctp.compound_id, ctp.target_id,
|
| 350 |
+
c.canonical_smiles, c.inchikey, c.pubchem_cid,
|
| 351 |
+
ctp.num_assays, ctp.num_sources, ctp.median_pchembl,
|
| 352 |
+
ctp.min_activity_value, ctp.max_activity_value,
|
| 353 |
+
ctp.earliest_year
|
| 354 |
+
FROM compound_target_pairs ctp
|
| 355 |
+
JOIN compounds c ON ctp.compound_id = c.compound_id
|
| 356 |
+
WHERE ctp.best_confidence = 'bronze'
|
| 357 |
+
ORDER BY RANDOM()
|
| 358 |
+
LIMIT ?
|
| 359 |
+
""",
|
| 360 |
+
(n * 3,),
|
| 361 |
+
).fetchall()
|
| 362 |
+
|
| 363 |
+
# Fetch PubChem names for DAVIS compounds missing from cache
|
| 364 |
+
bronze_cids_needing_names = set()
|
| 365 |
+
for row in bronze_rows:
|
| 366 |
+
cid = row[0] # compound_id
|
| 367 |
+
pcid = row[4] # pubchem_cid
|
| 368 |
+
if cid not in names and pcid:
|
| 369 |
+
bronze_cids_needing_names.add(int(pcid))
|
| 370 |
+
|
| 371 |
+
if bronze_cids_needing_names:
|
| 372 |
+
print(f" Fetching PubChem names for {len(bronze_cids_needing_names)} DAVIS compounds...")
|
| 373 |
+
pc_names = fetch_pubchem_names(list(bronze_cids_needing_names))
|
| 374 |
+
# Map pubchem_cid -> compound_id for update
|
| 375 |
+
pcid_to_compid = {
|
| 376 |
+
int(row[4]): row[0]
|
| 377 |
+
for row in bronze_rows
|
| 378 |
+
if row[4]
|
| 379 |
+
}
|
| 380 |
+
for pcid, name in pc_names.items():
|
| 381 |
+
if pcid in pcid_to_compid:
|
| 382 |
+
names[pcid_to_compid[pcid]] = name
|
| 383 |
+
|
| 384 |
+
# Part 2: Borderline silver — single assay, activity near threshold, named
|
| 385 |
+
borderline_rows = conn.execute(
|
| 386 |
+
"""
|
| 387 |
+
SELECT ctp.compound_id, ctp.target_id,
|
| 388 |
+
c.canonical_smiles, c.inchikey, c.pubchem_cid,
|
| 389 |
+
ctp.num_assays, ctp.num_sources, ctp.median_pchembl,
|
| 390 |
+
ctp.min_activity_value, ctp.max_activity_value,
|
| 391 |
+
ctp.earliest_year
|
| 392 |
+
FROM compound_target_pairs ctp
|
| 393 |
+
JOIN compounds c ON ctp.compound_id = c.compound_id
|
| 394 |
+
WHERE ctp.best_confidence = 'silver'
|
| 395 |
+
AND c.chembl_id IS NOT NULL
|
| 396 |
+
AND ctp.num_assays = 1
|
| 397 |
+
AND ctp.num_sources = 1
|
| 398 |
+
AND ctp.min_activity_value BETWEEN 5000 AND 15000
|
| 399 |
+
ORDER BY RANDOM()
|
| 400 |
+
LIMIT ?
|
| 401 |
+
""",
|
| 402 |
+
(n * 5,),
|
| 403 |
+
).fetchall()
|
| 404 |
+
conn.close()
|
| 405 |
+
|
| 406 |
+
cols = [
|
| 407 |
+
"compound_id", "target_id", "smiles", "inchikey", "pubchem_cid",
|
| 408 |
+
"num_assays", "num_sources", "median_pchembl",
|
| 409 |
+
"min_activity_value", "max_activity_value", "earliest_year",
|
| 410 |
+
]
|
| 411 |
+
|
| 412 |
+
bronze_df = pd.DataFrame(bronze_rows, columns=cols)
|
| 413 |
+
bronze_df["inconclusive_reason"] = "single_assay_bronze"
|
| 414 |
+
|
| 415 |
+
borderline_df = pd.DataFrame(borderline_rows, columns=cols)
|
| 416 |
+
borderline_df["inconclusive_reason"] = "borderline_threshold"
|
| 417 |
+
|
| 418 |
+
df = pd.concat([bronze_df, borderline_df], ignore_index=True)
|
| 419 |
+
|
| 420 |
+
# Add names
|
| 421 |
+
df["compound_name"] = df["compound_id"].map(names)
|
| 422 |
+
named = df[df["compound_name"].notna()].copy()
|
| 423 |
+
print(f" Inconclusive: {len(named)}/{len(df)} have names")
|
| 424 |
+
|
| 425 |
+
# If not enough named, use unnamed with SMILES
|
| 426 |
+
if len(named) < n:
|
| 427 |
+
unnamed = df[df["compound_name"].isna()].copy()
|
| 428 |
+
unnamed["compound_name"] = unnamed["smiles"].str[:50] + "..."
|
| 429 |
+
named = pd.concat([named, unnamed])
|
| 430 |
+
|
| 431 |
+
# Add target info
|
| 432 |
+
named["gene_symbol"] = named["target_id"].map(
|
| 433 |
+
lambda tid: target_info.get(tid, {}).get("gene_symbol")
|
| 434 |
+
)
|
| 435 |
+
named["target_family"] = named["target_id"].map(
|
| 436 |
+
lambda tid: target_info.get(tid, {}).get("family")
|
| 437 |
+
)
|
| 438 |
+
named["target_uniprot"] = named["target_id"].map(
|
| 439 |
+
lambda tid: target_info.get(tid, {}).get("uniprot")
|
| 440 |
+
)
|
| 441 |
+
|
| 442 |
+
# Difficulty: bronze = medium (single-assay, ambiguous evidence),
|
| 443 |
+
# borderline threshold = hard (near 10 µM cutoff, requires nuanced judgment)
|
| 444 |
+
# No "easy" tier: inconclusive cases inherently require careful interpretation
|
| 445 |
+
named["difficulty"] = named["inconclusive_reason"].map(
|
| 446 |
+
{"single_assay_bronze": "medium", "borderline_threshold": "hard"}
|
| 447 |
+
)
|
| 448 |
+
|
| 449 |
+
# Sample: 60% medium (bronze), 40% hard (borderline)
|
| 450 |
+
n_medium = int(n * 0.6)
|
| 451 |
+
n_hard = n - n_medium
|
| 452 |
+
|
| 453 |
+
selected = []
|
| 454 |
+
for diff, count in [("medium", n_medium), ("hard", n_hard)]:
|
| 455 |
+
pool = named[named["difficulty"] == diff]
|
| 456 |
+
sampled = pool.sample(min(count, len(pool)), random_state=seed)
|
| 457 |
+
for _, row in sampled.iterrows():
|
| 458 |
+
reason = (
|
| 459 |
+
"Single Kd measurement at threshold (DAVIS kinase panel)"
|
| 460 |
+
if row["inconclusive_reason"] == "single_assay_bronze"
|
| 461 |
+
else "Borderline activity near 10 µM threshold, single assay"
|
| 462 |
+
)
|
| 463 |
+
selected.append(
|
| 464 |
+
{
|
| 465 |
+
"class": "inconclusive",
|
| 466 |
+
"correct_answer": "C",
|
| 467 |
+
"difficulty": diff,
|
| 468 |
+
"compound_name": row["compound_name"],
|
| 469 |
+
"compound_smiles": row["smiles"],
|
| 470 |
+
"compound_inchikey": row["inchikey"],
|
| 471 |
+
"target_uniprot": row["target_uniprot"],
|
| 472 |
+
"target_gene": row["gene_symbol"],
|
| 473 |
+
"target_family": row["target_family"],
|
| 474 |
+
"num_assays": int(row["num_assays"]),
|
| 475 |
+
"num_sources": int(row["num_sources"]),
|
| 476 |
+
"activity_description": reason,
|
| 477 |
+
"pchembl_value": (
|
| 478 |
+
float(row["median_pchembl"])
|
| 479 |
+
if pd.notna(row["median_pchembl"])
|
| 480 |
+
else None
|
| 481 |
+
),
|
| 482 |
+
"evidence_quality": (
|
| 483 |
+
"bronze"
|
| 484 |
+
if row["inconclusive_reason"] == "single_assay_bronze"
|
| 485 |
+
else "silver"
|
| 486 |
+
),
|
| 487 |
+
"source_db": (
|
| 488 |
+
"DAVIS"
|
| 489 |
+
if row["inconclusive_reason"] == "single_assay_bronze"
|
| 490 |
+
else "NegBioDB"
|
| 491 |
+
),
|
| 492 |
+
}
|
| 493 |
+
)
|
| 494 |
+
|
| 495 |
+
random.Random(seed).shuffle(selected)
|
| 496 |
+
return selected[:n]
|
| 497 |
+
|
| 498 |
+
|
| 499 |
+
# ── Class D: Conditional ─────────────────────────────────────────────────────
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
def select_conditional(
|
| 503 |
+
m1_path: Path,
|
| 504 |
+
positives_df: pd.DataFrame,
|
| 505 |
+
names: dict,
|
| 506 |
+
ik_to_cid: dict,
|
| 507 |
+
target_info: dict,
|
| 508 |
+
n: int,
|
| 509 |
+
seed: int,
|
| 510 |
+
) -> list[dict]:
|
| 511 |
+
"""Select n conditional (cross-target selectivity) pairs.
|
| 512 |
+
|
| 513 |
+
These are compounds active against some targets but inactive against others.
|
| 514 |
+
"""
|
| 515 |
+
m1 = pd.read_parquet(m1_path, columns=["smiles", "inchikey", "uniprot_id", "Y"])
|
| 516 |
+
|
| 517 |
+
# Find compounds that appear as both active and inactive
|
| 518 |
+
compound_labels = m1.groupby("inchikey")["Y"].agg(["sum", "count"])
|
| 519 |
+
compound_labels.columns = ["n_active", "n_total"]
|
| 520 |
+
compound_labels["n_inactive"] = (
|
| 521 |
+
compound_labels["n_total"] - compound_labels["n_active"]
|
| 522 |
+
)
|
| 523 |
+
|
| 524 |
+
# Cross-target selectivity: active in ≥1, inactive in ≥1
|
| 525 |
+
cross_target = compound_labels[
|
| 526 |
+
(compound_labels["n_active"] >= 1) & (compound_labels["n_inactive"] >= 1)
|
| 527 |
+
].index.tolist()
|
| 528 |
+
|
| 529 |
+
print(f" Conditional: {len(cross_target)} cross-target selectivity compounds")
|
| 530 |
+
|
| 531 |
+
# Map to compound_ids for name lookup
|
| 532 |
+
cross_iks = set(cross_target)
|
| 533 |
+
m1_cross = m1[m1["inchikey"].isin(cross_iks)].copy()
|
| 534 |
+
m1_cross["compound_id"] = m1_cross["inchikey"].map(ik_to_cid)
|
| 535 |
+
m1_cross["compound_name"] = m1_cross["compound_id"].map(names)
|
| 536 |
+
|
| 537 |
+
# Filter to named compounds
|
| 538 |
+
named_iks = set(
|
| 539 |
+
m1_cross.loc[m1_cross["compound_name"].notna(), "inchikey"].unique()
|
| 540 |
+
)
|
| 541 |
+
print(f" Conditional: {len(named_iks)} with names")
|
| 542 |
+
|
| 543 |
+
# For each named cross-target compound, get its active and inactive targets
|
| 544 |
+
selected = []
|
| 545 |
+
rng = random.Random(seed)
|
| 546 |
+
shuffled_iks = list(named_iks)
|
| 547 |
+
rng.shuffle(shuffled_iks)
|
| 548 |
+
|
| 549 |
+
for ik in shuffled_iks:
|
| 550 |
+
if len(selected) >= n:
|
| 551 |
+
break
|
| 552 |
+
|
| 553 |
+
comp_data = m1_cross[m1_cross["inchikey"] == ik]
|
| 554 |
+
active_targets = comp_data[comp_data["Y"] == 1]["uniprot_id"].tolist()
|
| 555 |
+
inactive_targets = comp_data[comp_data["Y"] == 0]["uniprot_id"].tolist()
|
| 556 |
+
|
| 557 |
+
if not active_targets or not inactive_targets:
|
| 558 |
+
continue
|
| 559 |
+
|
| 560 |
+
compound_name = comp_data["compound_name"].iloc[0]
|
| 561 |
+
smiles = comp_data["smiles"].iloc[0]
|
| 562 |
+
compound_id = comp_data["compound_id"].iloc[0]
|
| 563 |
+
|
| 564 |
+
# Pick one inactive target for the question
|
| 565 |
+
inactive_t = rng.choice(inactive_targets)
|
| 566 |
+
# Get active target names for context
|
| 567 |
+
active_genes = []
|
| 568 |
+
for at in active_targets[:3]: # Show up to 3 active targets
|
| 569 |
+
info = _find_target_by_uniprot(target_info, at)
|
| 570 |
+
if info and info.get("gene_symbol"):
|
| 571 |
+
active_genes.append(info["gene_symbol"])
|
| 572 |
+
|
| 573 |
+
inactive_info = _find_target_by_uniprot(target_info, inactive_t)
|
| 574 |
+
|
| 575 |
+
# Difficulty based on number of targets
|
| 576 |
+
if len(active_targets) >= 5 and len(inactive_targets) >= 3:
|
| 577 |
+
difficulty = "hard"
|
| 578 |
+
elif len(active_targets) >= 2:
|
| 579 |
+
difficulty = "medium"
|
| 580 |
+
else:
|
| 581 |
+
difficulty = "hard" # few targets = harder to reason about
|
| 582 |
+
|
| 583 |
+
active_context = (
|
| 584 |
+
f"Known active against: {', '.join(active_genes)}"
|
| 585 |
+
if active_genes
|
| 586 |
+
else f"Active against {len(active_targets)} other target(s)"
|
| 587 |
+
)
|
| 588 |
+
|
| 589 |
+
selected.append(
|
| 590 |
+
{
|
| 591 |
+
"class": "conditional",
|
| 592 |
+
"correct_answer": "D",
|
| 593 |
+
"difficulty": difficulty,
|
| 594 |
+
"compound_name": compound_name,
|
| 595 |
+
"compound_smiles": smiles,
|
| 596 |
+
"compound_inchikey": ik,
|
| 597 |
+
"target_uniprot": inactive_t,
|
| 598 |
+
"target_gene": (
|
| 599 |
+
inactive_info["gene_symbol"] if inactive_info else None
|
| 600 |
+
),
|
| 601 |
+
"target_family": (
|
| 602 |
+
inactive_info["family"] if inactive_info else None
|
| 603 |
+
),
|
| 604 |
+
"num_active_targets": len(active_targets),
|
| 605 |
+
"num_inactive_targets": len(inactive_targets),
|
| 606 |
+
"active_context": active_context,
|
| 607 |
+
"evidence_quality": "silver",
|
| 608 |
+
"source_db": "NegBioDB+ChEMBL",
|
| 609 |
+
}
|
| 610 |
+
)
|
| 611 |
+
|
| 612 |
+
rng.shuffle(selected)
|
| 613 |
+
return selected[:n]
|
| 614 |
+
|
| 615 |
+
|
| 616 |
+
def _find_target_by_uniprot(target_info: dict, uniprot: str) -> dict | None:
|
| 617 |
+
"""Find target info by UniProt accession."""
|
| 618 |
+
for tid, info in target_info.items():
|
| 619 |
+
if info["uniprot"] == uniprot:
|
| 620 |
+
return info
|
| 621 |
+
return None
|
| 622 |
+
|
| 623 |
+
|
| 624 |
+
# ── Assembly ─────────────────────────────────────────────────────────────────
|
| 625 |
+
|
| 626 |
+
|
| 627 |
+
def add_target_info_to_active(records: list[dict], target_info: dict):
|
| 628 |
+
"""Add target gene/family to active records."""
|
| 629 |
+
for rec in records:
|
| 630 |
+
info = _find_target_by_uniprot(target_info, rec["target_uniprot"])
|
| 631 |
+
if info:
|
| 632 |
+
rec["target_gene"] = info.get("gene_symbol")
|
| 633 |
+
rec["target_family"] = info.get("family")
|
| 634 |
+
else:
|
| 635 |
+
rec["target_gene"] = None
|
| 636 |
+
rec["target_family"] = None
|
| 637 |
+
|
| 638 |
+
|
| 639 |
+
def generate_context_text(record: dict) -> str:
|
| 640 |
+
"""Generate the MCQ prompt context from a record.
|
| 641 |
+
|
| 642 |
+
Design intent: This is an assay data interpretation test. The prompt
|
| 643 |
+
deliberately includes activity measurements (pChEMBL, assay counts,
|
| 644 |
+
activity descriptions) because the task tests whether the LLM can
|
| 645 |
+
correctly interpret bioactivity data, not whether it can guess from
|
| 646 |
+
compound/target names alone.
|
| 647 |
+
"""
|
| 648 |
+
name = record.get("compound_name", "Unknown")
|
| 649 |
+
smiles = record.get("compound_smiles", "")
|
| 650 |
+
gene = record.get("target_gene")
|
| 651 |
+
uniprot = record.get("target_uniprot", "Unknown")
|
| 652 |
+
family = record.get("target_family") or "protein"
|
| 653 |
+
|
| 654 |
+
# Target display: "EGFR (P00533), kinase" or "P00533, protein" if no gene
|
| 655 |
+
if gene:
|
| 656 |
+
target_str = f"{gene} ({uniprot}), {family}"
|
| 657 |
+
else:
|
| 658 |
+
target_str = f"{uniprot}, {family}"
|
| 659 |
+
|
| 660 |
+
lines = [
|
| 661 |
+
f"Compound: {name}",
|
| 662 |
+
f"SMILES: {smiles}",
|
| 663 |
+
f"Target: {target_str}",
|
| 664 |
+
]
|
| 665 |
+
|
| 666 |
+
cls = record["class"]
|
| 667 |
+
if cls == "active":
|
| 668 |
+
act_type = record.get("activity_type", "IC50")
|
| 669 |
+
act_val = record.get("activity_value_nm", 0)
|
| 670 |
+
pchembl = record.get("pchembl_value", 0)
|
| 671 |
+
lines.append(f"Activity: {act_type} = {act_val:.1f} nM (pChEMBL {pchembl:.2f})")
|
| 672 |
+
elif cls == "inactive":
|
| 673 |
+
desc = record.get("activity_description", "Inactive")
|
| 674 |
+
n_assays = record.get("num_assays", 1)
|
| 675 |
+
n_src = record.get("num_sources", 1)
|
| 676 |
+
lines.append(f"Result: {desc}")
|
| 677 |
+
lines.append(f"Evidence: {n_assays} assay(s), {n_src} source(s)")
|
| 678 |
+
elif cls == "inconclusive":
|
| 679 |
+
desc = record.get("activity_description", "Inconclusive")
|
| 680 |
+
lines.append(f"Result: {desc}")
|
| 681 |
+
lines.append(f"Evidence: {record.get('num_assays', 1)} assay, single source")
|
| 682 |
+
elif cls == "conditional":
|
| 683 |
+
ctx = record.get("active_context", "")
|
| 684 |
+
lines.append(f"Context: {ctx}")
|
| 685 |
+
lines.append("Tested against this target: no significant activity at 10 µM")
|
| 686 |
+
|
| 687 |
+
lines.append("")
|
| 688 |
+
lines.append("What is the most likely interaction outcome for this compound-target pair?")
|
| 689 |
+
lines.append("A) Active B) Inactive C) Inconclusive D) Conditionally active")
|
| 690 |
+
|
| 691 |
+
return "\n".join(lines)
|
| 692 |
+
|
| 693 |
+
|
| 694 |
+
def split_dataset(records: list[dict], seed: int) -> list[dict]:
|
| 695 |
+
"""Assign split: 200 fewshot (50/class), 200 val (50/class), rest test."""
|
| 696 |
+
rng = random.Random(seed)
|
| 697 |
+
|
| 698 |
+
by_class = {}
|
| 699 |
+
for rec in records:
|
| 700 |
+
by_class.setdefault(rec["class"], []).append(rec)
|
| 701 |
+
|
| 702 |
+
for cls_records in by_class.values():
|
| 703 |
+
rng.shuffle(cls_records)
|
| 704 |
+
|
| 705 |
+
for cls, cls_records in by_class.items():
|
| 706 |
+
for i, rec in enumerate(cls_records):
|
| 707 |
+
if i < 50:
|
| 708 |
+
rec["split"] = "fewshot"
|
| 709 |
+
elif i < 100:
|
| 710 |
+
rec["split"] = "val"
|
| 711 |
+
else:
|
| 712 |
+
rec["split"] = "test"
|
| 713 |
+
|
| 714 |
+
all_records = []
|
| 715 |
+
for cls_records in by_class.values():
|
| 716 |
+
all_records.extend(cls_records)
|
| 717 |
+
|
| 718 |
+
rng.shuffle(all_records)
|
| 719 |
+
return all_records
|
| 720 |
+
|
| 721 |
+
|
| 722 |
+
def main():
|
| 723 |
+
parser = argparse.ArgumentParser(description="Build L1 MCQ dataset")
|
| 724 |
+
parser.add_argument("--seed", type=int, default=42)
|
| 725 |
+
args = parser.parse_args()
|
| 726 |
+
seed = args.seed
|
| 727 |
+
|
| 728 |
+
print("Loading data sources...")
|
| 729 |
+
names = load_compound_names()
|
| 730 |
+
print(f" Compound names: {len(names)}")
|
| 731 |
+
|
| 732 |
+
target_info = load_target_info(NEGBIODB_PATH)
|
| 733 |
+
print(f" Targets: {len(target_info)}")
|
| 734 |
+
|
| 735 |
+
ik_to_cid, chembl_to_cid = load_compound_ids(NEGBIODB_PATH)
|
| 736 |
+
print(f" InChIKey map: {len(ik_to_cid)}")
|
| 737 |
+
|
| 738 |
+
positives = pd.read_parquet(POSITIVES_PATH)
|
| 739 |
+
print(f" ChEMBL positives: {len(positives)}")
|
| 740 |
+
|
| 741 |
+
# ── Select each class ──
|
| 742 |
+
print("\nSelecting Active (A)...")
|
| 743 |
+
active = select_active(positives, names, ik_to_cid, N_ACTIVE, seed)
|
| 744 |
+
add_target_info_to_active(active, target_info)
|
| 745 |
+
print(f" Selected: {len(active)}")
|
| 746 |
+
|
| 747 |
+
print("\nSelecting Inactive (B)...")
|
| 748 |
+
inactive = select_inactive(NEGBIODB_PATH, names, target_info, N_INACTIVE, seed)
|
| 749 |
+
print(f" Selected: {len(inactive)}")
|
| 750 |
+
|
| 751 |
+
print("\nSelecting Inconclusive (C)...")
|
| 752 |
+
inconclusive = select_inconclusive(
|
| 753 |
+
NEGBIODB_PATH, names, target_info, N_INCONCLUSIVE, seed
|
| 754 |
+
)
|
| 755 |
+
print(f" Selected: {len(inconclusive)}")
|
| 756 |
+
|
| 757 |
+
print("\nSelecting Conditional (D)...")
|
| 758 |
+
conditional = select_conditional(
|
| 759 |
+
M1_PATH, positives, names, ik_to_cid, target_info, N_CONDITIONAL, seed
|
| 760 |
+
)
|
| 761 |
+
print(f" Selected: {len(conditional)}")
|
| 762 |
+
|
| 763 |
+
# ── C-2: Cross-class dedup + L-7: Per-class compound repetition cap ──
|
| 764 |
+
# Remove compound-target pairs that appear in multiple classes.
|
| 765 |
+
# Priority: active > inactive > inconclusive > conditional.
|
| 766 |
+
# Compound cap is per-class (not global) because the same compound
|
| 767 |
+
# appearing as active against target A and inconclusive against target B
|
| 768 |
+
# is scientifically valid (selectivity) and IS what L1 tests.
|
| 769 |
+
used_pairs = set() # (inchikey[:14], uniprot) pairs already used
|
| 770 |
+
|
| 771 |
+
def _dedup_class(records, class_name):
|
| 772 |
+
"""Filter records removing cross-class pair conflicts and applying per-class compound cap."""
|
| 773 |
+
kept = []
|
| 774 |
+
removed_pair = 0
|
| 775 |
+
removed_cap = 0
|
| 776 |
+
class_compound_counts = {} # per-class compound cap
|
| 777 |
+
for rec in records:
|
| 778 |
+
ik = rec.get("compound_inchikey", "")
|
| 779 |
+
uni = rec.get("target_uniprot", "")
|
| 780 |
+
ik14 = ik[:14] if ik else ""
|
| 781 |
+
pair = (ik14, uni)
|
| 782 |
+
|
| 783 |
+
if pair in used_pairs:
|
| 784 |
+
removed_pair += 1
|
| 785 |
+
continue
|
| 786 |
+
if ik14 and class_compound_counts.get(ik14, 0) >= MAX_PER_COMPOUND:
|
| 787 |
+
removed_cap += 1
|
| 788 |
+
continue
|
| 789 |
+
|
| 790 |
+
kept.append(rec)
|
| 791 |
+
used_pairs.add(pair)
|
| 792 |
+
class_compound_counts[ik14] = class_compound_counts.get(ik14, 0) + 1
|
| 793 |
+
|
| 794 |
+
if removed_pair or removed_cap:
|
| 795 |
+
print(f" {class_name}: removed {removed_pair} pair conflicts, "
|
| 796 |
+
f"{removed_cap} compound cap violations")
|
| 797 |
+
return kept
|
| 798 |
+
|
| 799 |
+
active = _dedup_class(active, "Active")
|
| 800 |
+
inactive = _dedup_class(inactive, "Inactive")
|
| 801 |
+
inconclusive = _dedup_class(inconclusive, "Inconclusive")
|
| 802 |
+
conditional = _dedup_class(conditional, "Conditional")
|
| 803 |
+
|
| 804 |
+
# ── Assemble ──
|
| 805 |
+
all_records = active + inactive + inconclusive + conditional
|
| 806 |
+
total = len(all_records)
|
| 807 |
+
print(f"\nTotal records: {total} (after dedup)")
|
| 808 |
+
|
| 809 |
+
# Generate context text
|
| 810 |
+
for rec in all_records:
|
| 811 |
+
rec["context_text"] = generate_context_text(rec)
|
| 812 |
+
|
| 813 |
+
# Assign splits
|
| 814 |
+
all_records = split_dataset(all_records, seed)
|
| 815 |
+
|
| 816 |
+
# Add question IDs
|
| 817 |
+
for i, rec in enumerate(all_records):
|
| 818 |
+
rec["question_id"] = f"L1-{i:04d}"
|
| 819 |
+
|
| 820 |
+
# ── Summary ──
|
| 821 |
+
print("\n=== Dataset Summary ===")
|
| 822 |
+
class_counts = {}
|
| 823 |
+
diff_counts = {}
|
| 824 |
+
split_counts = {}
|
| 825 |
+
for rec in all_records:
|
| 826 |
+
class_counts[rec["class"]] = class_counts.get(rec["class"], 0) + 1
|
| 827 |
+
diff_counts[rec["difficulty"]] = diff_counts.get(rec["difficulty"], 0) + 1
|
| 828 |
+
split_counts[rec["split"]] = split_counts.get(rec["split"], 0) + 1
|
| 829 |
+
|
| 830 |
+
print(f"Classes: {class_counts}")
|
| 831 |
+
print(f"Difficulty: {diff_counts}")
|
| 832 |
+
print(f"Splits: {split_counts}")
|
| 833 |
+
|
| 834 |
+
# ── Save ──
|
| 835 |
+
OUTPUT_PATH.parent.mkdir(parents=True, exist_ok=True)
|
| 836 |
+
with open(OUTPUT_PATH, "w") as f:
|
| 837 |
+
for rec in all_records:
|
| 838 |
+
f.write(json.dumps(rec, ensure_ascii=False) + "\n")
|
| 839 |
+
|
| 840 |
+
print(f"\nSaved to {OUTPUT_PATH}")
|
| 841 |
+
print(f" {total} questions")
|
| 842 |
+
|
| 843 |
+
|
| 844 |
+
if __name__ == "__main__":
|
| 845 |
+
main()
|
scripts/build_l2_dataset.py
ADDED
|
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Build L2 Abstract Annotation dataset for LLM benchmark.
|
| 3 |
+
|
| 4 |
+
Step 1 (automated): Search PubMed for abstracts reporting negative DTI results.
|
| 5 |
+
Step 2 (manual): Human annotation of gold-standard structured extraction.
|
| 6 |
+
|
| 7 |
+
This script handles Step 1: abstract retrieval and candidate selection.
|
| 8 |
+
|
| 9 |
+
Search strategy:
|
| 10 |
+
- PubMed E-utilities with queries for negative DTI reporting
|
| 11 |
+
- Stratify: 40 explicit / 30 hedged / 30 implicit negative results
|
| 12 |
+
- Output: candidate abstracts for human review
|
| 13 |
+
|
| 14 |
+
Output: exports/llm_benchmarks/l2_candidates.jsonl
|
| 15 |
+
(Gold file created manually: l2_gold.jsonl)
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import argparse
|
| 19 |
+
import json
|
| 20 |
+
import time
|
| 21 |
+
import urllib.request
|
| 22 |
+
import urllib.error
|
| 23 |
+
import xml.etree.ElementTree as ET
|
| 24 |
+
from pathlib import Path
|
| 25 |
+
|
| 26 |
+
PROJECT_ROOT = Path(__file__).resolve().parent.parent
|
| 27 |
+
OUTPUT_PATH = PROJECT_ROOT / "exports" / "llm_benchmarks" / "l2_candidates.jsonl"
|
| 28 |
+
|
| 29 |
+
PUBMED_BASE = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils"
|
| 30 |
+
DELAY = 0.4 # seconds between API calls (NCBI recommends max 3/sec without key)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def search_pubmed(query: str, retmax: int = 100) -> list[str]:
|
| 34 |
+
"""Search PubMed and return list of PMIDs."""
|
| 35 |
+
params = (
|
| 36 |
+
f"db=pubmed&term={urllib.request.quote(query)}"
|
| 37 |
+
f"&retmax={retmax}&retmode=json&sort=relevance"
|
| 38 |
+
)
|
| 39 |
+
url = f"{PUBMED_BASE}/esearch.fcgi?{params}"
|
| 40 |
+
|
| 41 |
+
try:
|
| 42 |
+
with urllib.request.urlopen(url, timeout=30) as resp:
|
| 43 |
+
data = json.loads(resp.read())
|
| 44 |
+
return data.get("esearchresult", {}).get("idlist", [])
|
| 45 |
+
except Exception as e:
|
| 46 |
+
print(f" Search error: {e}")
|
| 47 |
+
return []
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def fetch_abstracts(pmids: list[str]) -> list[dict]:
|
| 51 |
+
"""Fetch abstract text for list of PMIDs using efetch."""
|
| 52 |
+
if not pmids:
|
| 53 |
+
return []
|
| 54 |
+
|
| 55 |
+
# Batch fetch (up to 200 per request)
|
| 56 |
+
results = []
|
| 57 |
+
for i in range(0, len(pmids), 100):
|
| 58 |
+
batch = pmids[i : i + 100]
|
| 59 |
+
ids = ",".join(batch)
|
| 60 |
+
url = f"{PUBMED_BASE}/efetch.fcgi?db=pubmed&id={ids}&rettype=xml"
|
| 61 |
+
|
| 62 |
+
try:
|
| 63 |
+
with urllib.request.urlopen(url, timeout=60) as resp:
|
| 64 |
+
xml_text = resp.read()
|
| 65 |
+
except Exception as e:
|
| 66 |
+
print(f" Fetch error for batch {i//100}: {e}")
|
| 67 |
+
continue
|
| 68 |
+
|
| 69 |
+
root = ET.fromstring(xml_text)
|
| 70 |
+
for article in root.findall(".//PubmedArticle"):
|
| 71 |
+
pmid_el = article.find(".//PMID")
|
| 72 |
+
title_el = article.find(".//ArticleTitle")
|
| 73 |
+
abstract_el = article.find(".//Abstract")
|
| 74 |
+
|
| 75 |
+
if pmid_el is None or abstract_el is None:
|
| 76 |
+
continue
|
| 77 |
+
|
| 78 |
+
pmid = pmid_el.text
|
| 79 |
+
title = title_el.text if title_el is not None else ""
|
| 80 |
+
|
| 81 |
+
# Concatenate all AbstractText elements
|
| 82 |
+
abstract_parts = []
|
| 83 |
+
for at in abstract_el.findall("AbstractText"):
|
| 84 |
+
label = at.get("Label", "")
|
| 85 |
+
text = "".join(at.itertext()).strip()
|
| 86 |
+
if label:
|
| 87 |
+
abstract_parts.append(f"{label}: {text}")
|
| 88 |
+
else:
|
| 89 |
+
abstract_parts.append(text)
|
| 90 |
+
abstract_text = " ".join(abstract_parts)
|
| 91 |
+
|
| 92 |
+
# Get year
|
| 93 |
+
year_el = article.find(".//PubDate/Year")
|
| 94 |
+
year = int(year_el.text) if year_el is not None else None
|
| 95 |
+
|
| 96 |
+
if abstract_text and len(abstract_text) > 100:
|
| 97 |
+
results.append(
|
| 98 |
+
{
|
| 99 |
+
"pmid": pmid,
|
| 100 |
+
"title": title,
|
| 101 |
+
"abstract_text": abstract_text,
|
| 102 |
+
"year": year,
|
| 103 |
+
}
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
time.sleep(DELAY)
|
| 107 |
+
|
| 108 |
+
return results
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
# Negative DTI reporting search queries by category
|
| 112 |
+
QUERIES = {
|
| 113 |
+
"explicit": [
|
| 114 |
+
# Explicit statements of inactivity
|
| 115 |
+
'("did not inhibit" OR "no inhibition" OR "showed no activity") AND '
|
| 116 |
+
'("drug target" OR "IC50" OR "binding assay") AND '
|
| 117 |
+
'("selectivity" OR "specificity") AND 2020:2025[dp]',
|
| 118 |
+
# HTS negative results
|
| 119 |
+
'("inactive" OR "no effect") AND ("high-throughput screening" OR "HTS") '
|
| 120 |
+
'AND ("kinase" OR "protease" OR "GPCR") AND 2018:2025[dp]',
|
| 121 |
+
],
|
| 122 |
+
"hedged": [
|
| 123 |
+
# Hedged/qualified negative results
|
| 124 |
+
'("weak activity" OR "marginal" OR "modest inhibition") AND '
|
| 125 |
+
'("IC50" OR "Ki" OR "Kd") AND ("selectivity panel" OR "kinase panel") '
|
| 126 |
+
'AND 2019:2025[dp]',
|
| 127 |
+
# Borderline results
|
| 128 |
+
'("borderline" OR "insufficient" OR "below threshold") AND '
|
| 129 |
+
'("drug discovery" OR "medicinal chemistry") AND '
|
| 130 |
+
'("IC50 >" OR "Ki >") AND 2018:2025[dp]',
|
| 131 |
+
],
|
| 132 |
+
"implicit": [
|
| 133 |
+
# Implicit negatives (selectivity studies where some targets are inactive)
|
| 134 |
+
'("selectivity profile" OR "kinome scan" OR "selectivity panel") AND '
|
| 135 |
+
'("selective for" OR "selective inhibitor") AND '
|
| 136 |
+
'("drug target interaction" OR "kinase inhibitor") AND 2019:2025[dp]',
|
| 137 |
+
# SAR studies with inactive analogues
|
| 138 |
+
'("structure-activity relationship" OR "SAR") AND '
|
| 139 |
+
'("inactive analogue" OR "loss of activity" OR "no binding") '
|
| 140 |
+
'AND 2018:2025[dp]',
|
| 141 |
+
],
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def main():
|
| 146 |
+
parser = argparse.ArgumentParser(description="Search PubMed for L2 candidates")
|
| 147 |
+
parser.add_argument(
|
| 148 |
+
"--per-query", type=int, default=80, help="Max PMIDs per query"
|
| 149 |
+
)
|
| 150 |
+
args = parser.parse_args()
|
| 151 |
+
|
| 152 |
+
all_abstracts = {} # pmid -> record (dedup)
|
| 153 |
+
|
| 154 |
+
for category, queries in QUERIES.items():
|
| 155 |
+
print(f"\n=== Category: {category} ===")
|
| 156 |
+
for q in queries:
|
| 157 |
+
print(f" Query: {q[:80]}...")
|
| 158 |
+
pmids = search_pubmed(q, retmax=args.per_query)
|
| 159 |
+
print(f" Found: {len(pmids)} PMIDs")
|
| 160 |
+
|
| 161 |
+
if pmids:
|
| 162 |
+
abstracts = fetch_abstracts(pmids)
|
| 163 |
+
for a in abstracts:
|
| 164 |
+
a["search_category"] = category
|
| 165 |
+
all_abstracts[a["pmid"]] = a
|
| 166 |
+
print(f" Fetched: {len(abstracts)} abstracts with text")
|
| 167 |
+
|
| 168 |
+
time.sleep(DELAY)
|
| 169 |
+
|
| 170 |
+
# Stratify: target 40 explicit / 30 hedged / 30 implicit
|
| 171 |
+
by_cat = {}
|
| 172 |
+
for rec in all_abstracts.values():
|
| 173 |
+
by_cat.setdefault(rec["search_category"], []).append(rec)
|
| 174 |
+
|
| 175 |
+
targets = {"explicit": 50, "hedged": 40, "implicit": 40}
|
| 176 |
+
selected = []
|
| 177 |
+
for cat, recs in by_cat.items():
|
| 178 |
+
n = targets.get(cat, 30)
|
| 179 |
+
selected.extend(recs[:n])
|
| 180 |
+
|
| 181 |
+
print(f"\n=== Summary ===")
|
| 182 |
+
print(f"Total unique abstracts: {len(all_abstracts)}")
|
| 183 |
+
from collections import Counter
|
| 184 |
+
|
| 185 |
+
cat_counts = Counter(r["search_category"] for r in selected)
|
| 186 |
+
print(f"Selected: {len(selected)} ({dict(cat_counts)})")
|
| 187 |
+
|
| 188 |
+
# Save candidates
|
| 189 |
+
OUTPUT_PATH.parent.mkdir(parents=True, exist_ok=True)
|
| 190 |
+
with open(OUTPUT_PATH, "w") as f:
|
| 191 |
+
for i, rec in enumerate(selected):
|
| 192 |
+
rec["candidate_id"] = f"L2-C{i:04d}"
|
| 193 |
+
f.write(json.dumps(rec, ensure_ascii=False) + "\n")
|
| 194 |
+
|
| 195 |
+
print(f"\nSaved to {OUTPUT_PATH}")
|
| 196 |
+
print(f"\nNext step: Human review + annotation → l2_gold.jsonl")
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
if __name__ == "__main__":
|
| 200 |
+
main()
|
scripts/build_l3_dataset.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Build L3 Reasoning Pilot dataset for LLM benchmark.
|
| 3 |
+
|
| 4 |
+
Generates 50 well-known inactive DTI pairs with reasoning rubrics.
|
| 5 |
+
Target diversity: kinases 20, GPCRs 10, proteases 10, other 10
|
| 6 |
+
|
| 7 |
+
The LLM must explain WHY the compound is inactive against the target.
|
| 8 |
+
Evaluation: LLM-as-Judge with 4-dimension rubric.
|
| 9 |
+
|
| 10 |
+
Split: 5 few-shot + 5 val + 40 test
|
| 11 |
+
|
| 12 |
+
Output: exports/llm_benchmarks/l3_reasoning_pilot.jsonl
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
import argparse
|
| 16 |
+
import json
|
| 17 |
+
import random
|
| 18 |
+
import sqlite3
|
| 19 |
+
from pathlib import Path
|
| 20 |
+
|
| 21 |
+
import pandas as pd
|
| 22 |
+
|
| 23 |
+
PROJECT_ROOT = Path(__file__).resolve().parent.parent
|
| 24 |
+
NEGBIODB_PATH = PROJECT_ROOT / "data" / "negbiodb.db"
|
| 25 |
+
NAMES_PATH = PROJECT_ROOT / "exports" / "compound_names.parquet"
|
| 26 |
+
OUTPUT_PATH = PROJECT_ROOT / "exports" / "llm_benchmarks" / "l3_reasoning_pilot.jsonl"
|
| 27 |
+
|
| 28 |
+
# Family allocation
|
| 29 |
+
FAMILY_ALLOCATION = {
|
| 30 |
+
"kinase": 20,
|
| 31 |
+
"GPCR": 10,
|
| 32 |
+
"protease": 10,
|
| 33 |
+
"other": 10,
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def load_compound_names() -> dict:
|
| 38 |
+
df = pd.read_parquet(NAMES_PATH)
|
| 39 |
+
return {
|
| 40 |
+
int(row["compound_id"]): row["pref_name"]
|
| 41 |
+
for _, row in df.iterrows()
|
| 42 |
+
if pd.notna(row["pref_name"])
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def select_reasoning_pairs(
|
| 47 |
+
db_path: Path, names: dict, seed: int
|
| 48 |
+
) -> list[dict]:
|
| 49 |
+
"""Select 50 well-known inactive DTI pairs with target diversity.
|
| 50 |
+
|
| 51 |
+
Prioritizes: multi-assay evidence, named compounds, diverse targets.
|
| 52 |
+
"""
|
| 53 |
+
conn = sqlite3.connect(str(db_path))
|
| 54 |
+
rng = random.Random(seed)
|
| 55 |
+
|
| 56 |
+
# Select high-evidence pairs (multi-assay, named compounds)
|
| 57 |
+
rows = conn.execute(
|
| 58 |
+
"""
|
| 59 |
+
SELECT ctp.compound_id, ctp.target_id,
|
| 60 |
+
c.canonical_smiles, c.inchikey,
|
| 61 |
+
t.uniprot_accession, t.gene_symbol, t.target_family,
|
| 62 |
+
ctp.num_assays, ctp.num_sources, ctp.earliest_year,
|
| 63 |
+
ctp.median_pchembl
|
| 64 |
+
FROM compound_target_pairs ctp
|
| 65 |
+
JOIN compounds c ON ctp.compound_id = c.compound_id
|
| 66 |
+
JOIN targets t ON ctp.target_id = t.target_id
|
| 67 |
+
WHERE ctp.best_confidence = 'silver'
|
| 68 |
+
AND c.chembl_id IS NOT NULL
|
| 69 |
+
AND ctp.num_assays >= 2
|
| 70 |
+
ORDER BY RANDOM()
|
| 71 |
+
LIMIT 5000
|
| 72 |
+
""",
|
| 73 |
+
).fetchall()
|
| 74 |
+
conn.close()
|
| 75 |
+
|
| 76 |
+
cols = [
|
| 77 |
+
"compound_id", "target_id", "smiles", "inchikey",
|
| 78 |
+
"uniprot", "gene_symbol", "family", "num_assays",
|
| 79 |
+
"num_sources", "earliest_year", "median_pchembl",
|
| 80 |
+
]
|
| 81 |
+
df = pd.DataFrame(rows, columns=cols)
|
| 82 |
+
df["compound_name"] = df["compound_id"].map(names)
|
| 83 |
+
named = df[df["compound_name"].notna()].copy()
|
| 84 |
+
|
| 85 |
+
# L-5: Prefer targets with gene symbols for interpretability
|
| 86 |
+
named = named.sort_values("gene_symbol", na_position="last")
|
| 87 |
+
print(f" Named high-evidence pairs: {len(named)}")
|
| 88 |
+
print(f" With gene symbol: {named['gene_symbol'].notna().sum()}")
|
| 89 |
+
|
| 90 |
+
# M-2: Use FAMILY_ALLOCATION for family-stratified sampling
|
| 91 |
+
# Classify into allocation buckets
|
| 92 |
+
def classify_family(fam):
|
| 93 |
+
if fam and fam.lower() == "kinase":
|
| 94 |
+
return "kinase"
|
| 95 |
+
if fam and fam.lower() in ("gpcr", "g protein-coupled receptor"):
|
| 96 |
+
return "GPCR"
|
| 97 |
+
if fam and fam.lower() in ("protease", "peptidase"):
|
| 98 |
+
return "protease"
|
| 99 |
+
return "other"
|
| 100 |
+
|
| 101 |
+
named["family_bucket"] = named["family"].apply(classify_family)
|
| 102 |
+
|
| 103 |
+
# One pair per target, stratified by family
|
| 104 |
+
unique_targets = named.drop_duplicates("target_id")
|
| 105 |
+
all_selected = []
|
| 106 |
+
for bucket, n_target in FAMILY_ALLOCATION.items():
|
| 107 |
+
pool = unique_targets[unique_targets["family_bucket"] == bucket]
|
| 108 |
+
# Prefer targets with gene symbols
|
| 109 |
+
pool = pool.sort_values("gene_symbol", na_position="last")
|
| 110 |
+
n_sample = min(n_target, len(pool))
|
| 111 |
+
sampled = pool.head(n_sample * 3).sample(
|
| 112 |
+
min(n_sample, len(pool)), random_state=seed
|
| 113 |
+
)
|
| 114 |
+
for _, row in sampled.iterrows():
|
| 115 |
+
all_selected.append(
|
| 116 |
+
{
|
| 117 |
+
"class": "reasoning",
|
| 118 |
+
"compound_name": row["compound_name"],
|
| 119 |
+
"compound_smiles": row["smiles"],
|
| 120 |
+
"compound_inchikey": row["inchikey"],
|
| 121 |
+
"target_uniprot": row["uniprot"],
|
| 122 |
+
"target_gene": row["gene_symbol"],
|
| 123 |
+
"target_family": row["family"] or "protein",
|
| 124 |
+
"family_bucket": bucket,
|
| 125 |
+
"num_assays": int(row["num_assays"]),
|
| 126 |
+
"num_sources": int(row["num_sources"]),
|
| 127 |
+
"evidence_quality": "silver",
|
| 128 |
+
}
|
| 129 |
+
)
|
| 130 |
+
print(f" {bucket}: {n_sample}/{n_target} selected")
|
| 131 |
+
|
| 132 |
+
# Fill remaining if any bucket was short
|
| 133 |
+
remaining = 50 - len(all_selected)
|
| 134 |
+
if remaining > 0:
|
| 135 |
+
used_targets = {r["target_uniprot"] for r in all_selected}
|
| 136 |
+
leftover = unique_targets[~unique_targets["uniprot"].isin(used_targets)]
|
| 137 |
+
leftover = leftover.sort_values("gene_symbol", na_position="last")
|
| 138 |
+
extra = leftover.head(remaining)
|
| 139 |
+
for _, row in extra.iterrows():
|
| 140 |
+
all_selected.append(
|
| 141 |
+
{
|
| 142 |
+
"class": "reasoning",
|
| 143 |
+
"compound_name": row["compound_name"],
|
| 144 |
+
"compound_smiles": row["smiles"],
|
| 145 |
+
"compound_inchikey": row["inchikey"],
|
| 146 |
+
"target_uniprot": row["uniprot"],
|
| 147 |
+
"target_gene": row["gene_symbol"],
|
| 148 |
+
"target_family": row["family"] or "protein",
|
| 149 |
+
"family_bucket": classify_family(row["family"]),
|
| 150 |
+
"num_assays": int(row["num_assays"]),
|
| 151 |
+
"num_sources": int(row["num_sources"]),
|
| 152 |
+
"evidence_quality": "silver",
|
| 153 |
+
}
|
| 154 |
+
)
|
| 155 |
+
print(f" Backfill: {len(extra)} extra pairs")
|
| 156 |
+
|
| 157 |
+
rng.shuffle(all_selected)
|
| 158 |
+
return all_selected[:50]
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def generate_context_text(record: dict) -> str:
|
| 162 |
+
"""Generate L3 reasoning prompt."""
|
| 163 |
+
name = record.get("compound_name", "Unknown")
|
| 164 |
+
smiles = record.get("compound_smiles", "")
|
| 165 |
+
gene = record.get("target_gene")
|
| 166 |
+
uniprot = record.get("target_uniprot", "Unknown")
|
| 167 |
+
family = record.get("target_family") or "protein"
|
| 168 |
+
target_str = f"{gene} ({uniprot}), {family}" if gene else f"{uniprot}, {family}"
|
| 169 |
+
|
| 170 |
+
lines = [
|
| 171 |
+
f"Compound: {name}",
|
| 172 |
+
f"SMILES: {smiles}",
|
| 173 |
+
f"Target: {target_str}",
|
| 174 |
+
"",
|
| 175 |
+
"This compound has been experimentally confirmed as INACTIVE against this target.",
|
| 176 |
+
"",
|
| 177 |
+
"Explain the likely molecular and pharmacological reasons for this inactivity.",
|
| 178 |
+
"Consider: binding site compatibility, selectivity profile, structural features,",
|
| 179 |
+
"mechanism of action, and any known SAR (structure-activity relationship) data.",
|
| 180 |
+
]
|
| 181 |
+
return "\n".join(lines)
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def split_dataset(records: list[dict], seed: int) -> list[dict]:
|
| 185 |
+
"""5 fewshot + 5 val + 40 test."""
|
| 186 |
+
rng = random.Random(seed)
|
| 187 |
+
rng.shuffle(records)
|
| 188 |
+
for i, rec in enumerate(records):
|
| 189 |
+
if i < 5:
|
| 190 |
+
rec["split"] = "fewshot"
|
| 191 |
+
elif i < 10:
|
| 192 |
+
rec["split"] = "val"
|
| 193 |
+
else:
|
| 194 |
+
rec["split"] = "test"
|
| 195 |
+
return records
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def main():
|
| 199 |
+
parser = argparse.ArgumentParser(description="Build L3 reasoning pilot")
|
| 200 |
+
parser.add_argument("--seed", type=int, default=42)
|
| 201 |
+
args = parser.parse_args()
|
| 202 |
+
|
| 203 |
+
print("Loading data sources...")
|
| 204 |
+
names = load_compound_names()
|
| 205 |
+
print(f" Compound names: {len(names)}")
|
| 206 |
+
|
| 207 |
+
print("\nSelecting reasoning pairs by family...")
|
| 208 |
+
records = select_reasoning_pairs(NEGBIODB_PATH, names, args.seed)
|
| 209 |
+
print(f"\nTotal: {len(records)}")
|
| 210 |
+
|
| 211 |
+
for rec in records:
|
| 212 |
+
rec["context_text"] = generate_context_text(rec)
|
| 213 |
+
|
| 214 |
+
records = split_dataset(records, args.seed)
|
| 215 |
+
for i, rec in enumerate(records):
|
| 216 |
+
rec["question_id"] = f"L3-{i:04d}"
|
| 217 |
+
|
| 218 |
+
# Summary
|
| 219 |
+
from collections import Counter
|
| 220 |
+
families = Counter(r["target_family"] for r in records)
|
| 221 |
+
splits = Counter(r["split"] for r in records)
|
| 222 |
+
print(f"Families: {dict(families)}")
|
| 223 |
+
print(f"Splits: {dict(splits)}")
|
| 224 |
+
|
| 225 |
+
OUTPUT_PATH.parent.mkdir(parents=True, exist_ok=True)
|
| 226 |
+
with open(OUTPUT_PATH, "w") as f:
|
| 227 |
+
for rec in records:
|
| 228 |
+
f.write(json.dumps(rec, ensure_ascii=False) + "\n")
|
| 229 |
+
|
| 230 |
+
print(f"\nSaved to {OUTPUT_PATH}")
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
if __name__ == "__main__":
|
| 234 |
+
main()
|
scripts/build_l4_dataset.py
ADDED
|
@@ -0,0 +1,446 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Build L4 Tested-vs-Untested dataset for LLM benchmark.
|
| 3 |
+
|
| 4 |
+
Generates 500 compound-target pairs:
|
| 5 |
+
250 tested pairs (from NegBioDB, confirmed inactive)
|
| 6 |
+
- 125 pre-2023 (earliest_year < 2023)
|
| 7 |
+
- 125 post-2024 (earliest_year >= 2024)
|
| 8 |
+
250 untested pairs
|
| 9 |
+
- 125 trick pairs: well-known drug × well-known target, but untested
|
| 10 |
+
- 125 drug × Tdark target: known drug × understudied target
|
| 11 |
+
|
| 12 |
+
Anti-contamination:
|
| 13 |
+
- Pre-2023 vs post-2024 accuracy comparison (>15% gap → memorization flag)
|
| 14 |
+
- Evidence citation requirement (LLM must provide assay ID / DOI)
|
| 15 |
+
- All "untested" pairs verified against NegBioDB + ChEMBL positives
|
| 16 |
+
|
| 17 |
+
Split: 50 few-shot + 50 val + 400 test
|
| 18 |
+
|
| 19 |
+
Output: exports/llm_benchmarks/l4_tested_untested.jsonl
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
import argparse
|
| 23 |
+
import json
|
| 24 |
+
import random
|
| 25 |
+
import sqlite3
|
| 26 |
+
from pathlib import Path
|
| 27 |
+
|
| 28 |
+
import pandas as pd
|
| 29 |
+
|
| 30 |
+
PROJECT_ROOT = Path(__file__).resolve().parent.parent
|
| 31 |
+
NEGBIODB_PATH = PROJECT_ROOT / "data" / "negbiodb.db"
|
| 32 |
+
POSITIVES_PATH = PROJECT_ROOT / "exports" / "chembl_positives_pchembl6.parquet"
|
| 33 |
+
NAMES_PATH = PROJECT_ROOT / "exports" / "compound_names.parquet"
|
| 34 |
+
OUTPUT_PATH = PROJECT_ROOT / "exports" / "llm_benchmarks" / "l4_tested_untested.jsonl"
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def load_compound_names() -> dict:
|
| 38 |
+
"""compound_id -> pref_name."""
|
| 39 |
+
df = pd.read_parquet(NAMES_PATH)
|
| 40 |
+
return {
|
| 41 |
+
int(row["compound_id"]): row["pref_name"]
|
| 42 |
+
for _, row in df.iterrows()
|
| 43 |
+
if pd.notna(row["pref_name"])
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def load_target_info(db_path: Path) -> dict:
|
| 48 |
+
"""target_id -> {uniprot, gene_symbol, family, dev_level}."""
|
| 49 |
+
conn = sqlite3.connect(str(db_path))
|
| 50 |
+
rows = conn.execute(
|
| 51 |
+
"SELECT target_id, uniprot_accession, gene_symbol, target_family, "
|
| 52 |
+
"development_level FROM targets"
|
| 53 |
+
).fetchall()
|
| 54 |
+
conn.close()
|
| 55 |
+
return {
|
| 56 |
+
r[0]: {
|
| 57 |
+
"uniprot": r[1],
|
| 58 |
+
"gene_symbol": r[2],
|
| 59 |
+
"family": r[3],
|
| 60 |
+
"dev_level": r[4],
|
| 61 |
+
}
|
| 62 |
+
for r in rows
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def load_tested_set(db_path: Path) -> set[tuple[str, str]]:
|
| 67 |
+
"""Load all tested (inchikey_connectivity, uniprot) pairs."""
|
| 68 |
+
conn = sqlite3.connect(str(db_path))
|
| 69 |
+
rows = conn.execute(
|
| 70 |
+
"""
|
| 71 |
+
SELECT DISTINCT c.inchikey_connectivity, t.uniprot_accession
|
| 72 |
+
FROM compound_target_pairs ctp
|
| 73 |
+
JOIN compounds c ON ctp.compound_id = c.compound_id
|
| 74 |
+
JOIN targets t ON ctp.target_id = t.target_id
|
| 75 |
+
"""
|
| 76 |
+
).fetchall()
|
| 77 |
+
conn.close()
|
| 78 |
+
return {(r[0], r[1]) for r in rows}
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def load_positive_set(positives_path: Path) -> set[tuple[str, str]]:
|
| 82 |
+
"""Load ChEMBL positive (inchikey, uniprot) pairs."""
|
| 83 |
+
df = pd.read_parquet(positives_path, columns=["inchikey", "uniprot_id"])
|
| 84 |
+
return {(row["inchikey"], row["uniprot_id"]) for _, row in df.iterrows()}
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
# ── Tested pairs ─────────────────────────────────────────────────────────────
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def select_tested_pairs(
|
| 91 |
+
db_path: Path,
|
| 92 |
+
names: dict,
|
| 93 |
+
target_info: dict,
|
| 94 |
+
n_pre: int,
|
| 95 |
+
n_post: int,
|
| 96 |
+
seed: int,
|
| 97 |
+
) -> list[dict]:
|
| 98 |
+
"""Select tested pairs with temporal stratification."""
|
| 99 |
+
conn = sqlite3.connect(str(db_path))
|
| 100 |
+
|
| 101 |
+
def query_temporal(year_clause: str, limit: int) -> list:
|
| 102 |
+
return conn.execute(
|
| 103 |
+
f"""
|
| 104 |
+
SELECT ctp.compound_id, ctp.target_id,
|
| 105 |
+
c.canonical_smiles, c.inchikey, c.inchikey_connectivity,
|
| 106 |
+
t.uniprot_accession, t.gene_symbol, t.target_family,
|
| 107 |
+
ctp.num_assays, ctp.num_sources, ctp.earliest_year,
|
| 108 |
+
ctp.best_confidence
|
| 109 |
+
FROM compound_target_pairs ctp
|
| 110 |
+
JOIN compounds c ON ctp.compound_id = c.compound_id
|
| 111 |
+
JOIN targets t ON ctp.target_id = t.target_id
|
| 112 |
+
WHERE ctp.best_confidence = 'silver'
|
| 113 |
+
AND c.chembl_id IS NOT NULL
|
| 114 |
+
AND {year_clause}
|
| 115 |
+
ORDER BY RANDOM()
|
| 116 |
+
LIMIT ?
|
| 117 |
+
""",
|
| 118 |
+
(limit,),
|
| 119 |
+
).fetchall()
|
| 120 |
+
|
| 121 |
+
pre_rows = query_temporal("ctp.earliest_year < 2023", n_pre * 10)
|
| 122 |
+
post_rows = query_temporal("ctp.earliest_year >= 2024", n_post * 10)
|
| 123 |
+
conn.close()
|
| 124 |
+
|
| 125 |
+
cols = [
|
| 126 |
+
"compound_id", "target_id", "smiles", "inchikey", "inchikey_conn",
|
| 127 |
+
"uniprot", "gene_symbol", "family", "num_assays", "num_sources",
|
| 128 |
+
"earliest_year", "confidence",
|
| 129 |
+
]
|
| 130 |
+
|
| 131 |
+
results = []
|
| 132 |
+
for rows, n, temporal in [(pre_rows, n_pre, "pre_2023"), (post_rows, n_post, "post_2024")]:
|
| 133 |
+
df = pd.DataFrame(rows, columns=cols)
|
| 134 |
+
df["compound_name"] = df["compound_id"].map(names)
|
| 135 |
+
named = df[df["compound_name"].notna()].copy()
|
| 136 |
+
|
| 137 |
+
# Diversify targets, prefer those with gene symbols
|
| 138 |
+
unique = named.drop_duplicates("target_id")
|
| 139 |
+
with_gene = unique[unique["gene_symbol"].notna()]
|
| 140 |
+
without_gene = unique[unique["gene_symbol"].isna()]
|
| 141 |
+
# Prioritize targets with gene symbols
|
| 142 |
+
prioritized = pd.concat([with_gene, without_gene])
|
| 143 |
+
sampled = prioritized.head(n * 3)
|
| 144 |
+
if len(sampled) < n:
|
| 145 |
+
extra = named[~named.index.isin(sampled.index)]
|
| 146 |
+
sampled = pd.concat([sampled, extra])
|
| 147 |
+
sampled = sampled.sample(min(n, len(sampled)), random_state=seed)
|
| 148 |
+
|
| 149 |
+
for _, row in sampled.iterrows():
|
| 150 |
+
results.append(
|
| 151 |
+
{
|
| 152 |
+
"class": "tested",
|
| 153 |
+
"correct_answer": "tested",
|
| 154 |
+
"temporal_group": temporal,
|
| 155 |
+
"compound_name": row["compound_name"],
|
| 156 |
+
"compound_smiles": row["smiles"],
|
| 157 |
+
"compound_inchikey": row["inchikey"],
|
| 158 |
+
"target_uniprot": row["uniprot"],
|
| 159 |
+
"target_gene": row["gene_symbol"],
|
| 160 |
+
"target_family": row["family"],
|
| 161 |
+
"num_assays": int(row["num_assays"]),
|
| 162 |
+
"num_sources": int(row["num_sources"]),
|
| 163 |
+
"earliest_year": int(row["earliest_year"]),
|
| 164 |
+
"evidence_quality": row["confidence"],
|
| 165 |
+
"source_db": "NegBioDB",
|
| 166 |
+
}
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
random.Random(seed).shuffle(results)
|
| 170 |
+
return results
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
# ── Untested pairs ───────────────────────────────────────────────────────────
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def select_untested_pairs(
|
| 177 |
+
db_path: Path,
|
| 178 |
+
names: dict,
|
| 179 |
+
target_info: dict,
|
| 180 |
+
tested_set: set,
|
| 181 |
+
positive_set: set,
|
| 182 |
+
n_trick: int,
|
| 183 |
+
n_tdark: int,
|
| 184 |
+
seed: int,
|
| 185 |
+
) -> list[dict]:
|
| 186 |
+
"""Select untested pairs (trick + Tdark)."""
|
| 187 |
+
conn = sqlite3.connect(str(db_path))
|
| 188 |
+
rng = random.Random(seed)
|
| 189 |
+
|
| 190 |
+
# Get well-known compounds (high degree, named)
|
| 191 |
+
well_known = conn.execute(
|
| 192 |
+
"""
|
| 193 |
+
SELECT c.compound_id, c.canonical_smiles, c.inchikey, c.inchikey_connectivity
|
| 194 |
+
FROM compounds c
|
| 195 |
+
WHERE c.chembl_id IS NOT NULL
|
| 196 |
+
ORDER BY (
|
| 197 |
+
SELECT MAX(ctp.compound_degree)
|
| 198 |
+
FROM compound_target_pairs ctp
|
| 199 |
+
WHERE ctp.compound_id = c.compound_id
|
| 200 |
+
) DESC
|
| 201 |
+
LIMIT 2000
|
| 202 |
+
"""
|
| 203 |
+
).fetchall()
|
| 204 |
+
|
| 205 |
+
# Get well-known targets (high degree)
|
| 206 |
+
well_known_targets = conn.execute(
|
| 207 |
+
"""
|
| 208 |
+
SELECT t.target_id, t.uniprot_accession, t.gene_symbol, t.target_family,
|
| 209 |
+
t.development_level
|
| 210 |
+
FROM targets t
|
| 211 |
+
ORDER BY (
|
| 212 |
+
SELECT MAX(ctp.target_degree)
|
| 213 |
+
FROM compound_target_pairs ctp
|
| 214 |
+
WHERE ctp.target_id = t.target_id
|
| 215 |
+
) DESC
|
| 216 |
+
LIMIT 500
|
| 217 |
+
"""
|
| 218 |
+
).fetchall()
|
| 219 |
+
|
| 220 |
+
# Get understudied targets (low degree, few tested compounds)
|
| 221 |
+
tdark_targets = conn.execute(
|
| 222 |
+
"""
|
| 223 |
+
SELECT t.target_id, t.uniprot_accession, t.gene_symbol, t.target_family,
|
| 224 |
+
t.development_level
|
| 225 |
+
FROM targets t
|
| 226 |
+
WHERE (
|
| 227 |
+
SELECT MAX(ctp.target_degree) FROM compound_target_pairs ctp
|
| 228 |
+
WHERE ctp.target_id = t.target_id
|
| 229 |
+
) <= 10
|
| 230 |
+
"""
|
| 231 |
+
).fetchall()
|
| 232 |
+
|
| 233 |
+
conn.close()
|
| 234 |
+
|
| 235 |
+
# Filter named compounds
|
| 236 |
+
named_compounds = [
|
| 237 |
+
(cid, smi, ik, ikc)
|
| 238 |
+
for cid, smi, ik, ikc in well_known
|
| 239 |
+
if cid in names
|
| 240 |
+
]
|
| 241 |
+
|
| 242 |
+
print(f" Untested: {len(named_compounds)} well-known named compounds")
|
| 243 |
+
print(f" Untested: {len(well_known_targets)} well-known targets")
|
| 244 |
+
print(f" Untested: {len(tdark_targets)} understudied targets (degree ≤ 10)")
|
| 245 |
+
|
| 246 |
+
# ── Trick pairs: well-known drug × well-known target, but untested ──
|
| 247 |
+
trick_pairs = []
|
| 248 |
+
rng.shuffle(named_compounds)
|
| 249 |
+
for cid, smi, ik, ikc in named_compounds:
|
| 250 |
+
if len(trick_pairs) >= n_trick:
|
| 251 |
+
break
|
| 252 |
+
rng.shuffle(well_known_targets)
|
| 253 |
+
for tid, uniprot, gene, family, dev in well_known_targets[:20]:
|
| 254 |
+
# Check if this pair is untested (not in tested set or positive set)
|
| 255 |
+
ik_14 = ikc if ikc else ik[:14] if ik else None
|
| 256 |
+
if ik_14 and (ik_14, uniprot) not in tested_set and (ik, uniprot) not in positive_set:
|
| 257 |
+
trick_pairs.append(
|
| 258 |
+
{
|
| 259 |
+
"class": "untested",
|
| 260 |
+
"correct_answer": "untested",
|
| 261 |
+
"untested_type": "trick",
|
| 262 |
+
"compound_name": names[cid],
|
| 263 |
+
"compound_smiles": smi,
|
| 264 |
+
"compound_inchikey": ik,
|
| 265 |
+
"target_uniprot": uniprot,
|
| 266 |
+
"target_gene": gene,
|
| 267 |
+
"target_family": family,
|
| 268 |
+
"target_dev_level": dev,
|
| 269 |
+
"source_db": None,
|
| 270 |
+
}
|
| 271 |
+
)
|
| 272 |
+
break
|
| 273 |
+
|
| 274 |
+
# ── Drug × understudied target ──
|
| 275 |
+
tdark_pairs = []
|
| 276 |
+
for cid, smi, ik, ikc in named_compounds:
|
| 277 |
+
if len(tdark_pairs) >= n_tdark:
|
| 278 |
+
break
|
| 279 |
+
rng.shuffle(tdark_targets)
|
| 280 |
+
for tid, uniprot, gene, family, dev in tdark_targets[:10]:
|
| 281 |
+
ik_14 = ikc if ikc else ik[:14] if ik else None
|
| 282 |
+
if ik_14 and (ik_14, uniprot) not in tested_set and (ik, uniprot) not in positive_set:
|
| 283 |
+
tdark_pairs.append(
|
| 284 |
+
{
|
| 285 |
+
"class": "untested",
|
| 286 |
+
"correct_answer": "untested",
|
| 287 |
+
"untested_type": "tdark",
|
| 288 |
+
"compound_name": names[cid],
|
| 289 |
+
"compound_smiles": smi,
|
| 290 |
+
"compound_inchikey": ik,
|
| 291 |
+
"target_uniprot": uniprot,
|
| 292 |
+
"target_gene": gene,
|
| 293 |
+
"target_family": family,
|
| 294 |
+
"target_dev_level": dev,
|
| 295 |
+
"source_db": None,
|
| 296 |
+
}
|
| 297 |
+
)
|
| 298 |
+
break
|
| 299 |
+
|
| 300 |
+
print(f" Trick pairs: {len(trick_pairs)}")
|
| 301 |
+
print(f" Tdark pairs: {len(tdark_pairs)}")
|
| 302 |
+
|
| 303 |
+
all_untested = trick_pairs + tdark_pairs
|
| 304 |
+
rng.shuffle(all_untested)
|
| 305 |
+
return all_untested
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
# ── Context generation ────────────────────────────────────────────────────────
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def generate_context_text(record: dict) -> str:
|
| 312 |
+
"""Generate the L4 prompt context."""
|
| 313 |
+
name = record.get("compound_name", "Unknown")
|
| 314 |
+
smiles = record.get("compound_smiles", "")
|
| 315 |
+
gene = record.get("target_gene")
|
| 316 |
+
uniprot = record.get("target_uniprot", "Unknown")
|
| 317 |
+
family = record.get("target_family") or "protein"
|
| 318 |
+
|
| 319 |
+
target_str = f"{gene} ({uniprot}), {family}" if gene else f"{uniprot}, {family}"
|
| 320 |
+
|
| 321 |
+
lines = [
|
| 322 |
+
f"Compound: {name}",
|
| 323 |
+
f"SMILES: {smiles}",
|
| 324 |
+
f"Target: {target_str}",
|
| 325 |
+
"",
|
| 326 |
+
"Has this compound-target pair been experimentally tested for interaction?",
|
| 327 |
+
"If tested, provide the source (database, assay ID, or publication).",
|
| 328 |
+
"",
|
| 329 |
+
"Answer: tested / untested",
|
| 330 |
+
]
|
| 331 |
+
return "\n".join(lines)
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
# ── Split ─────────────────────────────────────────────────────────────────────
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def split_dataset(records: list[dict], seed: int) -> list[dict]:
|
| 338 |
+
"""50 fewshot + 50 val + 400 test (balanced tested/untested in each)."""
|
| 339 |
+
rng = random.Random(seed)
|
| 340 |
+
|
| 341 |
+
by_class = {"tested": [], "untested": []}
|
| 342 |
+
for rec in records:
|
| 343 |
+
by_class[rec["class"]].append(rec)
|
| 344 |
+
|
| 345 |
+
for cls_records in by_class.values():
|
| 346 |
+
rng.shuffle(cls_records)
|
| 347 |
+
|
| 348 |
+
for cls, cls_records in by_class.items():
|
| 349 |
+
for i, rec in enumerate(cls_records):
|
| 350 |
+
if i < 25:
|
| 351 |
+
rec["split"] = "fewshot"
|
| 352 |
+
elif i < 50:
|
| 353 |
+
rec["split"] = "val"
|
| 354 |
+
else:
|
| 355 |
+
rec["split"] = "test"
|
| 356 |
+
|
| 357 |
+
all_records = by_class["tested"] + by_class["untested"]
|
| 358 |
+
rng.shuffle(all_records)
|
| 359 |
+
return all_records
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
def main():
|
| 363 |
+
parser = argparse.ArgumentParser(description="Build L4 tested/untested dataset")
|
| 364 |
+
parser.add_argument("--seed", type=int, default=42)
|
| 365 |
+
args = parser.parse_args()
|
| 366 |
+
seed = args.seed
|
| 367 |
+
|
| 368 |
+
print("Loading data sources...")
|
| 369 |
+
names = load_compound_names()
|
| 370 |
+
print(f" Compound names: {len(names)}")
|
| 371 |
+
|
| 372 |
+
target_info = load_target_info(NEGBIODB_PATH)
|
| 373 |
+
print(f" Targets: {len(target_info)}")
|
| 374 |
+
|
| 375 |
+
print("Loading tested/positive sets for verification...")
|
| 376 |
+
tested_set = load_tested_set(NEGBIODB_PATH)
|
| 377 |
+
print(f" Tested pairs: {len(tested_set)}")
|
| 378 |
+
|
| 379 |
+
positive_set = load_positive_set(POSITIVES_PATH)
|
| 380 |
+
print(f" Positive pairs: {len(positive_set)}")
|
| 381 |
+
|
| 382 |
+
# Select tested pairs (temporal split)
|
| 383 |
+
print("\nSelecting tested pairs...")
|
| 384 |
+
tested = select_tested_pairs(NEGBIODB_PATH, names, target_info, 125, 125, seed)
|
| 385 |
+
print(f" Selected: {len(tested)}")
|
| 386 |
+
pre_count = sum(1 for r in tested if r.get("temporal_group") == "pre_2023")
|
| 387 |
+
post_count = sum(1 for r in tested if r.get("temporal_group") == "post_2024")
|
| 388 |
+
print(f" Pre-2023: {pre_count}, Post-2024: {post_count}")
|
| 389 |
+
|
| 390 |
+
# Select untested pairs
|
| 391 |
+
print("\nSelecting untested pairs...")
|
| 392 |
+
untested = select_untested_pairs(
|
| 393 |
+
NEGBIODB_PATH, names, target_info, tested_set, positive_set, 125, 125, seed
|
| 394 |
+
)
|
| 395 |
+
print(f" Selected: {len(untested)}")
|
| 396 |
+
|
| 397 |
+
# Assemble
|
| 398 |
+
all_records = tested + untested
|
| 399 |
+
total = len(all_records)
|
| 400 |
+
print(f"\nTotal records: {total}")
|
| 401 |
+
|
| 402 |
+
# Generate context
|
| 403 |
+
for rec in all_records:
|
| 404 |
+
rec["context_text"] = generate_context_text(rec)
|
| 405 |
+
|
| 406 |
+
# Split
|
| 407 |
+
all_records = split_dataset(all_records, seed)
|
| 408 |
+
|
| 409 |
+
# Add IDs
|
| 410 |
+
for i, rec in enumerate(all_records):
|
| 411 |
+
rec["question_id"] = f"L4-{i:04d}"
|
| 412 |
+
|
| 413 |
+
# Verify: no untested pair should be in tested_set or positive_set
|
| 414 |
+
n_leaks = 0
|
| 415 |
+
for rec in all_records:
|
| 416 |
+
if rec["class"] == "untested":
|
| 417 |
+
ik = rec.get("compound_inchikey", "")
|
| 418 |
+
uni = rec.get("target_uniprot", "")
|
| 419 |
+
ik14 = ik[:14] if ik else ""
|
| 420 |
+
if (ik14, uni) in tested_set or (ik, uni) in positive_set:
|
| 421 |
+
n_leaks += 1
|
| 422 |
+
print(f"\nVerification: {n_leaks} leaked untested pairs (should be 0)")
|
| 423 |
+
|
| 424 |
+
# Summary
|
| 425 |
+
from collections import Counter
|
| 426 |
+
print("\n=== Dataset Summary ===")
|
| 427 |
+
print(f"Classes: {Counter(r['class'] for r in all_records)}")
|
| 428 |
+
print(f"Splits: {Counter(r['split'] for r in all_records)}")
|
| 429 |
+
if tested:
|
| 430 |
+
print(f"Temporal: pre_2023={pre_count}, post_2024={post_count}")
|
| 431 |
+
if untested:
|
| 432 |
+
ut_types = Counter(r.get("untested_type") for r in all_records if r["class"] == "untested")
|
| 433 |
+
print(f"Untested types: {ut_types}")
|
| 434 |
+
|
| 435 |
+
# Save
|
| 436 |
+
OUTPUT_PATH.parent.mkdir(parents=True, exist_ok=True)
|
| 437 |
+
with open(OUTPUT_PATH, "w") as f:
|
| 438 |
+
for rec in all_records:
|
| 439 |
+
f.write(json.dumps(rec, ensure_ascii=False) + "\n")
|
| 440 |
+
|
| 441 |
+
print(f"\nSaved to {OUTPUT_PATH}")
|
| 442 |
+
print(f" {total} pairs")
|
| 443 |
+
|
| 444 |
+
|
| 445 |
+
if __name__ == "__main__":
|
| 446 |
+
main()
|
scripts/collect_llm_results.py
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Collect and summarize LLM benchmark results into Table 2.
|
| 3 |
+
|
| 4 |
+
Reads results from results/llm/{task}_{model}_{config}_fs{set}/results.json
|
| 5 |
+
Generates Table 2: [Task × Model × Config × Metric], mean ± std across 3 few-shot sets.
|
| 6 |
+
|
| 7 |
+
Output:
|
| 8 |
+
results/llm/table2.csv
|
| 9 |
+
results/llm/table2.md
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import argparse
|
| 13 |
+
import json
|
| 14 |
+
from collections import defaultdict
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
|
| 19 |
+
PROJECT_ROOT = Path(__file__).resolve().parent.parent
|
| 20 |
+
RESULTS_DIR = PROJECT_ROOT / "results" / "llm"
|
| 21 |
+
|
| 22 |
+
# Primary metrics per task
|
| 23 |
+
PRIMARY_METRICS = {
|
| 24 |
+
"l1": ["accuracy", "macro_f1", "mcc"],
|
| 25 |
+
"l2": ["schema_compliance", "entity_f1", "field_accuracy"],
|
| 26 |
+
"l3": ["judge_accuracy", "judge_reasoning", "judge_completeness", "judge_specificity", "overall"],
|
| 27 |
+
"l4": ["accuracy", "mcc", "evidence_citation_rate", "accuracy_pre_2023", "accuracy_post_2024", "contamination_gap"],
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def load_all_results(results_dir: Path) -> list[dict]:
|
| 32 |
+
"""Load all results.json files."""
|
| 33 |
+
results = []
|
| 34 |
+
for run_dir in sorted(results_dir.iterdir()):
|
| 35 |
+
if not run_dir.is_dir():
|
| 36 |
+
continue
|
| 37 |
+
# Skip judged directories (handled by L3 judge pipeline)
|
| 38 |
+
if run_dir.name.endswith("_judged"):
|
| 39 |
+
continue
|
| 40 |
+
results_file = run_dir / "results.json"
|
| 41 |
+
meta_file = run_dir / "run_meta.json"
|
| 42 |
+
|
| 43 |
+
if not results_file.exists():
|
| 44 |
+
continue
|
| 45 |
+
|
| 46 |
+
# For L3, prefer judged results (judge pipeline adds overall score)
|
| 47 |
+
judged_dir = results_dir / f"{run_dir.name}_judged"
|
| 48 |
+
judged_results = judged_dir / "results.json"
|
| 49 |
+
if run_dir.name.startswith("l3_") and judged_results.exists():
|
| 50 |
+
results_file = judged_results
|
| 51 |
+
|
| 52 |
+
with open(results_file) as f:
|
| 53 |
+
metrics = json.load(f)
|
| 54 |
+
meta = {}
|
| 55 |
+
if meta_file.exists():
|
| 56 |
+
with open(meta_file) as f:
|
| 57 |
+
meta = json.load(f)
|
| 58 |
+
|
| 59 |
+
# Parse run name: {task}_{model}_{config}_fs{set}
|
| 60 |
+
name = run_dir.name
|
| 61 |
+
parts = name.rsplit("_fs", 1)
|
| 62 |
+
if len(parts) == 2:
|
| 63 |
+
prefix = parts[0]
|
| 64 |
+
fs_set = int(parts[1])
|
| 65 |
+
else:
|
| 66 |
+
prefix = name
|
| 67 |
+
fs_set = 0
|
| 68 |
+
|
| 69 |
+
# Parse prefix: {task}_{model}_{config}
|
| 70 |
+
# task is always l1/l2/l3/l4
|
| 71 |
+
task = prefix[:2]
|
| 72 |
+
rest = prefix[3:] # skip "l1_"
|
| 73 |
+
# config is last part: zero-shot or 3-shot
|
| 74 |
+
if rest.endswith("_zero-shot"):
|
| 75 |
+
model = rest[:-10]
|
| 76 |
+
config = "zero-shot"
|
| 77 |
+
elif rest.endswith("_3-shot"):
|
| 78 |
+
model = rest[:-7]
|
| 79 |
+
config = "3-shot"
|
| 80 |
+
else:
|
| 81 |
+
model = rest
|
| 82 |
+
config = meta.get("config", "unknown")
|
| 83 |
+
|
| 84 |
+
results.append(
|
| 85 |
+
{
|
| 86 |
+
"run_name": name,
|
| 87 |
+
"task": task,
|
| 88 |
+
"model": model,
|
| 89 |
+
"config": config,
|
| 90 |
+
"fewshot_set": fs_set,
|
| 91 |
+
"metrics": metrics,
|
| 92 |
+
"meta": meta,
|
| 93 |
+
}
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
return results
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def aggregate_results(results: list[dict]) -> list[dict]:
|
| 100 |
+
"""Aggregate metrics across few-shot sets (mean ± std)."""
|
| 101 |
+
# Group by (task, model, config)
|
| 102 |
+
groups = defaultdict(list)
|
| 103 |
+
for r in results:
|
| 104 |
+
key = (r["task"], r["model"], r["config"])
|
| 105 |
+
groups[key].append(r["metrics"])
|
| 106 |
+
|
| 107 |
+
aggregated = []
|
| 108 |
+
for (task, model, config), metric_list in sorted(groups.items()):
|
| 109 |
+
# C-4: Zero-shot with deterministic models produces identical results.
|
| 110 |
+
# Report N=1 to avoid misleading std=0 across fake replicates.
|
| 111 |
+
effective_list = metric_list
|
| 112 |
+
if config == "zero-shot" and len(metric_list) > 1:
|
| 113 |
+
effective_list = [metric_list[0]]
|
| 114 |
+
|
| 115 |
+
row = {
|
| 116 |
+
"task": task,
|
| 117 |
+
"model": model,
|
| 118 |
+
"config": config,
|
| 119 |
+
"n_runs": len(effective_list),
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
metrics = PRIMARY_METRICS.get(task, [])
|
| 123 |
+
for metric in metrics:
|
| 124 |
+
# Strip "judge_" prefix to look up raw key in results.json
|
| 125 |
+
raw_key = metric.removeprefix("judge_")
|
| 126 |
+
values = []
|
| 127 |
+
for m in effective_list:
|
| 128 |
+
# Backward compat: derive contamination_gap from pre/post
|
| 129 |
+
if metric == "contamination_gap" and m.get("contamination_gap") is None:
|
| 130 |
+
pre = m.get("accuracy_pre_2023")
|
| 131 |
+
post = m.get("accuracy_post_2024")
|
| 132 |
+
val = round(pre - post, 4) if pre is not None and post is not None else None
|
| 133 |
+
else:
|
| 134 |
+
val = m.get(raw_key)
|
| 135 |
+
# Handle nested dicts (e.g., L3 overall.mean)
|
| 136 |
+
if isinstance(val, dict):
|
| 137 |
+
val = val.get("mean")
|
| 138 |
+
if val is not None and isinstance(val, (int, float)):
|
| 139 |
+
values.append(val)
|
| 140 |
+
|
| 141 |
+
if values:
|
| 142 |
+
row[f"{metric}_mean"] = float(np.mean(values))
|
| 143 |
+
row[f"{metric}_std"] = (
|
| 144 |
+
float(np.std(values, ddof=0)) if len(values) > 1 else 0.0
|
| 145 |
+
)
|
| 146 |
+
else:
|
| 147 |
+
row[f"{metric}_mean"] = None
|
| 148 |
+
row[f"{metric}_std"] = None
|
| 149 |
+
|
| 150 |
+
aggregated.append(row)
|
| 151 |
+
|
| 152 |
+
return aggregated
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def format_table(aggregated: list[dict]) -> str:
|
| 156 |
+
"""Format as markdown table."""
|
| 157 |
+
if not aggregated:
|
| 158 |
+
return "No results found."
|
| 159 |
+
|
| 160 |
+
# Determine all metric columns
|
| 161 |
+
metric_cols = set()
|
| 162 |
+
for row in aggregated:
|
| 163 |
+
for key in row:
|
| 164 |
+
if key.endswith("_mean"):
|
| 165 |
+
metric_cols.add(key.replace("_mean", ""))
|
| 166 |
+
metric_cols = sorted(metric_cols)
|
| 167 |
+
|
| 168 |
+
# Header
|
| 169 |
+
header = "| **Task** | **Model** | **Config** | **N** |"
|
| 170 |
+
for m in metric_cols:
|
| 171 |
+
header += f" **{m}** |"
|
| 172 |
+
lines = [header]
|
| 173 |
+
|
| 174 |
+
sep = "|" + "|".join(["---"] * (4 + len(metric_cols))) + "|"
|
| 175 |
+
lines.append(sep)
|
| 176 |
+
|
| 177 |
+
# Rows
|
| 178 |
+
for row in aggregated:
|
| 179 |
+
line = f"| {row['task']} | {row['model']} | {row['config']} | {row['n_runs']} |"
|
| 180 |
+
for m in metric_cols:
|
| 181 |
+
mean = row.get(f"{m}_mean")
|
| 182 |
+
std = row.get(f"{m}_std")
|
| 183 |
+
if mean is not None:
|
| 184 |
+
if std and std > 0:
|
| 185 |
+
line += f" {mean:.3f}±{std:.3f} |"
|
| 186 |
+
else:
|
| 187 |
+
line += f" {mean:.3f} |"
|
| 188 |
+
else:
|
| 189 |
+
line += " — |"
|
| 190 |
+
lines.append(line)
|
| 191 |
+
|
| 192 |
+
return "\n".join(lines)
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def main():
|
| 196 |
+
parser = argparse.ArgumentParser(description="Collect LLM results")
|
| 197 |
+
parser.add_argument(
|
| 198 |
+
"--results-dir", type=Path, default=RESULTS_DIR
|
| 199 |
+
)
|
| 200 |
+
args = parser.parse_args()
|
| 201 |
+
|
| 202 |
+
print("Loading LLM results...")
|
| 203 |
+
results = load_all_results(args.results_dir)
|
| 204 |
+
print(f" Found {len(results)} runs")
|
| 205 |
+
|
| 206 |
+
if not results:
|
| 207 |
+
print("No results found.")
|
| 208 |
+
return
|
| 209 |
+
|
| 210 |
+
# List runs
|
| 211 |
+
for r in results:
|
| 212 |
+
print(f" {r['run_name']}: {r['task']} / {r['model']} / {r['config']}")
|
| 213 |
+
|
| 214 |
+
print("\nAggregating across few-shot sets...")
|
| 215 |
+
aggregated = aggregate_results(results)
|
| 216 |
+
|
| 217 |
+
# Save CSV
|
| 218 |
+
csv_path = args.results_dir / "table2.csv"
|
| 219 |
+
with open(csv_path, "w") as f:
|
| 220 |
+
# Header: union of all keys across all rows (preserving order)
|
| 221 |
+
cols = []
|
| 222 |
+
seen = set()
|
| 223 |
+
for row in aggregated:
|
| 224 |
+
for k in row:
|
| 225 |
+
if k not in seen:
|
| 226 |
+
cols.append(k)
|
| 227 |
+
seen.add(k)
|
| 228 |
+
f.write(",".join(cols) + "\n")
|
| 229 |
+
for row in aggregated:
|
| 230 |
+
f.write(",".join(str(row.get(c, "")) for c in cols) + "\n")
|
| 231 |
+
print(f"Saved CSV: {csv_path}")
|
| 232 |
+
|
| 233 |
+
# Save Markdown
|
| 234 |
+
md_path = args.results_dir / "table2.md"
|
| 235 |
+
table_text = format_table(aggregated)
|
| 236 |
+
with open(md_path, "w") as f:
|
| 237 |
+
f.write(table_text)
|
| 238 |
+
print(f"Saved Markdown: {md_path}")
|
| 239 |
+
|
| 240 |
+
# Print table
|
| 241 |
+
print(f"\n{table_text}")
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
if __name__ == "__main__":
|
| 245 |
+
main()
|