| {
|
| "dataset_name": "sfe-scientists-first-exam",
|
| "pretty_name": "SFE (Scientists First Exam) Multimodal Science Benchmark",
|
| "name_zh": "SFE 科学家第一考试多模态评测数据集",
|
| "organization": "obaydata",
|
| "description": "SFE (Scientists First Exam) is a multimodal scientific LLM evaluation benchmark. It uses original research data combined with expert annotations to construct high-difficulty scientific reasoning datasets, evaluating models' full-chain research capabilities from signal perception and attribute understanding to complex reasoning.\n\n**Domains:** Astronomy, Earth Science, Life Science, Materials Science, and more.\n\n**Data Production:** Domain experts combine the latest research results to annotate high-difficulty multimodal image-text questions with answer explanations.\n\n**Difficulty Standard:** Data is produced to the pass@4 standard where the model answers correctly at most twice out of four attempts.",
|
| "description_zh": "SFE(Scientists First Exam)是多模态科学大模型评测基准,核心是用原始科研数据 + 专家标注构建高难度科学推理数据集,评估模型从信号感知、属性理解到复杂推理的全链条科研能力。\n\n由专家结合最新研究成果在天文学、地球科学、生命科学、材料科学等领域进行标注,生产出具备高难度的多模态图文题目并出具答案解析。生产数据按照pass@4标准模型至多答对两次。",
|
| "scale": "Weekly production capacity: 300 entries",
|
| "format": "JSON with images",
|
| "use_case": "Evaluating multimodal LLMs on expert-level scientific reasoning across multiple disciplines",
|
| "license": "cc-by-nc-4.0",
|
| "copyright": "Full copyright provided",
|
| "tags": [
|
| "science",
|
| "multimodal",
|
| "benchmark",
|
| "evaluation",
|
| "astronomy",
|
| "earth-science",
|
| "biology",
|
| "materials-science"
|
| ],
|
| "task_categories": [
|
| "visual-question-answering",
|
| "question-answering"
|
| ],
|
| "sample_link": "",
|
| "contact": "simon.su@obaydata.com",
|
| "homepage": "https://obaydata.com"
|
| } |