File size: 3,817 Bytes
474bc99 d5c1d08 474bc99 d5c1d08 474bc99 d5c1d08 474bc99 d5c1d08 474bc99 d5c1d08 474bc99 d5c1d08 474bc99 d5c1d08 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 | {
"@context": "https://schema.org",
"@type": "Dataset",
"name": "ALL Bench Leaderboard 2026",
"alternateName": [
"ALL Bench",
"ALLBench",
"AI Benchmark Leaderboard 2026"
],
"description": "The only AI benchmark leaderboard covering LLM, VLM, Agent, Image, Video, and Music generation in a single unified view. 91 models cross-verified across 6 modalities with confidence badges. Features composite 5-axis scoring, interactive comparison tools, and downloadable intelligence reports.",
"url": "https://huggingface.co/spaces/FINAL-Bench/all-bench-leaderboard",
"sameAs": [
"https://huggingface.co/datasets/FINAL-Bench/ALL-Bench-Leaderboard",
"https://github.com/final-bench/ALL-Bench-Leaderboard"
],
"license": "https://opensource.org/licenses/MIT",
"version": "2.2.1",
"datePublished": "2026-03-01",
"dateModified": "2026-03-10",
"creator": {
"@type": "Organization",
"name": "ALL Bench Team",
"url": "https://huggingface.co/FINAL-Bench"
},
"keywords": [
"AI benchmark",
"LLM leaderboard",
"GPT-5",
"Claude",
"Gemini",
"VLM benchmark",
"AI agent",
"image generation",
"video generation",
"music generation",
"MMLU-Pro",
"GPQA",
"ARC-AGI-2",
"FINAL Bench",
"metacognition",
"multimodal AI",
"AI evaluation",
"benchmark comparison",
"AI model ranking",
"open source AI"
],
"about": [
{
"@type": "Thing",
"name": "Large Language Model"
},
{
"@type": "Thing",
"name": "Vision Language Model"
},
{
"@type": "Thing",
"name": "AI Benchmark"
},
{
"@type": "Thing",
"name": "Generative AI"
},
{
"@type": "Thing",
"name": "Metacognition"
}
],
"measurementTechnique": "Cross-verified benchmark aggregation with 3-tier confidence system",
"variableMeasured": [
{
"@type": "PropertyValue",
"name": "MMLU-Pro",
"description": "57K expert-level multi-discipline questions"
},
{
"@type": "PropertyValue",
"name": "GPQA Diamond",
"description": "PhD-level expert questions in science"
},
{
"@type": "PropertyValue",
"name": "AIME 2025",
"description": "American Invitational Mathematics Examination"
},
{
"@type": "PropertyValue",
"name": "HLE",
"description": "Humanity's Last Exam — 2500 expert-sourced questions"
},
{
"@type": "PropertyValue",
"name": "ARC-AGI-2",
"description": "Abstract reasoning and novel pattern recognition"
},
{
"@type": "PropertyValue",
"name": "FINAL Bench Metacognitive",
"description": "AI self-correction ability measurement"
},
{
"@type": "PropertyValue",
"name": "SWE-Pro",
"description": "Software engineering benchmark by Scale AI"
},
{
"@type": "PropertyValue",
"name": "IFEval",
"description": "Instruction following evaluation"
},
{
"@type": "PropertyValue",
"name": "LiveCodeBench",
"description": "Continuously updated coding benchmark"
},
{
"@type": "PropertyValue",
"name": "Union Eval S3",
"description": "ALL Bench proprietary integrated benchmark, 100% JSON auto-graded"
}
],
"distribution": [
{
"@type": "DataDownload",
"encodingFormat": "application/json",
"contentUrl": "https://huggingface.co/datasets/FINAL-Bench/ALL-Bench-Leaderboard/resolve/main/all_bench_leaderboard_v2.1.json",
"name": "Unified JSON Dataset (75KB)"
}
],
"isPartOf": {
"@type": "DataCatalog",
"name": "Hugging Face Datasets",
"url": "https://huggingface.co/datasets"
},
"funder": {
"@type": "Organization",
"name": "FINAL Bench"
}
} |