| { |
| "@context": "https://schema.org", |
| "@type": "Dataset", |
| "name": "ALL Bench Leaderboard 2026", |
| "alternateName": [ |
| "ALL Bench", |
| "ALLBench", |
| "AI Benchmark Leaderboard 2026" |
| ], |
| "description": "The only AI benchmark leaderboard covering LLM, VLM, Agent, Image, Video, and Music generation in a single unified view. 91 models cross-verified across 6 modalities with confidence badges. Features composite 5-axis scoring, interactive comparison tools, and downloadable intelligence reports.", |
| "url": "https://huggingface.co/spaces/FINAL-Bench/all-bench-leaderboard", |
| "sameAs": [ |
| "https://huggingface.co/datasets/FINAL-Bench/ALL-Bench-Leaderboard", |
| "https://github.com/final-bench/ALL-Bench-Leaderboard" |
| ], |
| "license": "https://opensource.org/licenses/MIT", |
| "version": "2.2.1", |
| "datePublished": "2026-03-01", |
| "dateModified": "2026-03-10", |
| "creator": { |
| "@type": "Organization", |
| "name": "ALL Bench Team", |
| "url": "https://huggingface.co/FINAL-Bench" |
| }, |
| "keywords": [ |
| "AI benchmark", |
| "LLM leaderboard", |
| "GPT-5", |
| "Claude", |
| "Gemini", |
| "VLM benchmark", |
| "AI agent", |
| "image generation", |
| "video generation", |
| "music generation", |
| "MMLU-Pro", |
| "GPQA", |
| "ARC-AGI-2", |
| "FINAL Bench", |
| "metacognition", |
| "multimodal AI", |
| "AI evaluation", |
| "benchmark comparison", |
| "AI model ranking", |
| "open source AI" |
| ], |
| "about": [ |
| { |
| "@type": "Thing", |
| "name": "Large Language Model" |
| }, |
| { |
| "@type": "Thing", |
| "name": "Vision Language Model" |
| }, |
| { |
| "@type": "Thing", |
| "name": "AI Benchmark" |
| }, |
| { |
| "@type": "Thing", |
| "name": "Generative AI" |
| }, |
| { |
| "@type": "Thing", |
| "name": "Metacognition" |
| } |
| ], |
| "measurementTechnique": "Cross-verified benchmark aggregation with 3-tier confidence system", |
| "variableMeasured": [ |
| { |
| "@type": "PropertyValue", |
| "name": "MMLU-Pro", |
| "description": "57K expert-level multi-discipline questions" |
| }, |
| { |
| "@type": "PropertyValue", |
| "name": "GPQA Diamond", |
| "description": "PhD-level expert questions in science" |
| }, |
| { |
| "@type": "PropertyValue", |
| "name": "AIME 2025", |
| "description": "American Invitational Mathematics Examination" |
| }, |
| { |
| "@type": "PropertyValue", |
| "name": "HLE", |
| "description": "Humanity's Last Exam — 2500 expert-sourced questions" |
| }, |
| { |
| "@type": "PropertyValue", |
| "name": "ARC-AGI-2", |
| "description": "Abstract reasoning and novel pattern recognition" |
| }, |
| { |
| "@type": "PropertyValue", |
| "name": "FINAL Bench Metacognitive", |
| "description": "AI self-correction ability measurement" |
| }, |
| { |
| "@type": "PropertyValue", |
| "name": "SWE-Pro", |
| "description": "Software engineering benchmark by Scale AI" |
| }, |
| { |
| "@type": "PropertyValue", |
| "name": "IFEval", |
| "description": "Instruction following evaluation" |
| }, |
| { |
| "@type": "PropertyValue", |
| "name": "LiveCodeBench", |
| "description": "Continuously updated coding benchmark" |
| }, |
| { |
| "@type": "PropertyValue", |
| "name": "Union Eval S3", |
| "description": "ALL Bench proprietary integrated benchmark, 100% JSON auto-graded" |
| } |
| ], |
| "distribution": [ |
| { |
| "@type": "DataDownload", |
| "encodingFormat": "application/json", |
| "contentUrl": "https://huggingface.co/datasets/FINAL-Bench/ALL-Bench-Leaderboard/resolve/main/all_bench_leaderboard_v2.1.json", |
| "name": "Unified JSON Dataset (75KB)" |
| } |
| ], |
| "isPartOf": { |
| "@type": "DataCatalog", |
| "name": "Hugging Face Datasets", |
| "url": "https://huggingface.co/datasets" |
| }, |
| "funder": { |
| "@type": "Organization", |
| "name": "FINAL Bench" |
| } |
| } |