SeaWolf-AI commited on
Commit
474bc99
·
verified ·
1 Parent(s): 7a35fe9

Upload 2 files

Browse files
Files changed (2) hide show
  1. CITATION.cff +29 -0
  2. schema.jsonld +64 -0
CITATION.cff ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cff-version: 1.2.0
2
+ title: "ALL Bench Leaderboard 2026: Unified Multi-Modal AI Evaluation"
3
+ message: "If you use this dataset, please cite it as below."
4
+ type: dataset
5
+ authors:
6
+ - name: "ALL Bench Team"
7
+ url: "https://huggingface.co/spaces/FINAL-Bench/all-bench-leaderboard"
8
+ repository-code: "https://github.com/final-bench/ALL-Bench-Leaderboard"
9
+ license: MIT
10
+ version: "2.1"
11
+ date-released: "2026-03-08"
12
+ keywords:
13
+ - ai-benchmark
14
+ - llm-leaderboard
15
+ - vlm
16
+ - multimodal-ai
17
+ - metacognition
18
+ - final-bench
19
+ - gpt-5
20
+ - claude
21
+ - gemini
22
+ abstract: >-
23
+ ALL Bench Leaderboard is the only AI benchmark covering LLM, VLM, Agent,
24
+ Image, Video, and Music generation in a single unified view. It cross-verifies
25
+ 91 AI models across 6 modalities with a 3-tier confidence system. Features
26
+ composite 5-axis scoring (Knowledge, Expert Reasoning, Abstract Reasoning,
27
+ Metacognition, Execution), interactive comparison tools, and downloadable
28
+ intelligence reports. Includes FINAL Bench metacognitive evaluation where
29
+ Error Recovery explains 94.8% of self-correction performance variance.
schema.jsonld ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "@context": "https://schema.org",
3
+ "@type": "Dataset",
4
+ "name": "ALL Bench Leaderboard 2026",
5
+ "alternateName": ["ALL Bench", "ALLBench", "AI Benchmark Leaderboard 2026"],
6
+ "description": "The only AI benchmark leaderboard covering LLM, VLM, Agent, Image, Video, and Music generation in a single unified view. 91 models cross-verified across 6 modalities with confidence badges. Features composite 5-axis scoring, interactive comparison tools, and downloadable intelligence reports.",
7
+ "url": "https://huggingface.co/spaces/FINAL-Bench/all-bench-leaderboard",
8
+ "sameAs": [
9
+ "https://huggingface.co/datasets/FINAL-Bench/ALL-Bench-Leaderboard",
10
+ "https://github.com/final-bench/ALL-Bench-Leaderboard"
11
+ ],
12
+ "license": "https://opensource.org/licenses/MIT",
13
+ "version": "2.1",
14
+ "datePublished": "2026-03-01",
15
+ "dateModified": "2026-03-08",
16
+ "creator": {
17
+ "@type": "Organization",
18
+ "name": "ALL Bench Team",
19
+ "url": "https://huggingface.co/FINAL-Bench"
20
+ },
21
+ "keywords": [
22
+ "AI benchmark", "LLM leaderboard", "GPT-5", "Claude", "Gemini",
23
+ "VLM benchmark", "AI agent", "image generation", "video generation",
24
+ "music generation", "MMLU-Pro", "GPQA", "ARC-AGI-2", "FINAL Bench",
25
+ "metacognition", "multimodal AI", "AI evaluation", "benchmark comparison",
26
+ "AI model ranking", "open source AI"
27
+ ],
28
+ "about": [
29
+ {"@type": "Thing", "name": "Large Language Model"},
30
+ {"@type": "Thing", "name": "Vision Language Model"},
31
+ {"@type": "Thing", "name": "AI Benchmark"},
32
+ {"@type": "Thing", "name": "Generative AI"},
33
+ {"@type": "Thing", "name": "Metacognition"}
34
+ ],
35
+ "measurementTechnique": "Cross-verified benchmark aggregation with 3-tier confidence system",
36
+ "variableMeasured": [
37
+ {"@type": "PropertyValue", "name": "MMLU-Pro", "description": "57K expert-level multi-discipline questions"},
38
+ {"@type": "PropertyValue", "name": "GPQA Diamond", "description": "PhD-level expert questions in science"},
39
+ {"@type": "PropertyValue", "name": "AIME 2025", "description": "American Invitational Mathematics Examination"},
40
+ {"@type": "PropertyValue", "name": "HLE", "description": "Humanity's Last Exam — 2500 expert-sourced questions"},
41
+ {"@type": "PropertyValue", "name": "ARC-AGI-2", "description": "Abstract reasoning and novel pattern recognition"},
42
+ {"@type": "PropertyValue", "name": "FINAL Bench Metacognitive", "description": "AI self-correction ability measurement"},
43
+ {"@type": "PropertyValue", "name": "SWE-Pro", "description": "Software engineering benchmark by Scale AI"},
44
+ {"@type": "PropertyValue", "name": "IFEval", "description": "Instruction following evaluation"},
45
+ {"@type": "PropertyValue", "name": "LiveCodeBench", "description": "Continuously updated coding benchmark"}
46
+ ],
47
+ "distribution": [
48
+ {
49
+ "@type": "DataDownload",
50
+ "encodingFormat": "application/json",
51
+ "contentUrl": "https://huggingface.co/datasets/FINAL-Bench/ALL-Bench-Leaderboard/resolve/main/all_bench_leaderboard_v2.1.json",
52
+ "name": "Unified JSON Dataset (75KB)"
53
+ }
54
+ ],
55
+ "isPartOf": {
56
+ "@type": "DataCatalog",
57
+ "name": "Hugging Face Datasets",
58
+ "url": "https://huggingface.co/datasets"
59
+ },
60
+ "funder": {
61
+ "@type": "Organization",
62
+ "name": "FINAL Bench"
63
+ }
64
+ }