SeaWolf-AI commited on
Commit
d5c1d08
·
verified ·
1 Parent(s): fd3d74c

Upload schema.jsonld

Browse files
Files changed (1) hide show
  1. schema.jsonld +98 -23
schema.jsonld CHANGED
@@ -2,7 +2,11 @@
2
  "@context": "https://schema.org",
3
  "@type": "Dataset",
4
  "name": "ALL Bench Leaderboard 2026",
5
- "alternateName": ["ALL Bench", "ALLBench", "AI Benchmark Leaderboard 2026"],
 
 
 
 
6
  "description": "The only AI benchmark leaderboard covering LLM, VLM, Agent, Image, Video, and Music generation in a single unified view. 91 models cross-verified across 6 modalities with confidence badges. Features composite 5-axis scoring, interactive comparison tools, and downloadable intelligence reports.",
7
  "url": "https://huggingface.co/spaces/FINAL-Bench/all-bench-leaderboard",
8
  "sameAs": [
@@ -10,39 +14,110 @@
10
  "https://github.com/final-bench/ALL-Bench-Leaderboard"
11
  ],
12
  "license": "https://opensource.org/licenses/MIT",
13
- "version": "2.1",
14
  "datePublished": "2026-03-01",
15
- "dateModified": "2026-03-08",
16
  "creator": {
17
  "@type": "Organization",
18
  "name": "ALL Bench Team",
19
  "url": "https://huggingface.co/FINAL-Bench"
20
  },
21
  "keywords": [
22
- "AI benchmark", "LLM leaderboard", "GPT-5", "Claude", "Gemini",
23
- "VLM benchmark", "AI agent", "image generation", "video generation",
24
- "music generation", "MMLU-Pro", "GPQA", "ARC-AGI-2", "FINAL Bench",
25
- "metacognition", "multimodal AI", "AI evaluation", "benchmark comparison",
26
- "AI model ranking", "open source AI"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  ],
28
  "about": [
29
- {"@type": "Thing", "name": "Large Language Model"},
30
- {"@type": "Thing", "name": "Vision Language Model"},
31
- {"@type": "Thing", "name": "AI Benchmark"},
32
- {"@type": "Thing", "name": "Generative AI"},
33
- {"@type": "Thing", "name": "Metacognition"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  ],
35
  "measurementTechnique": "Cross-verified benchmark aggregation with 3-tier confidence system",
36
  "variableMeasured": [
37
- {"@type": "PropertyValue", "name": "MMLU-Pro", "description": "57K expert-level multi-discipline questions"},
38
- {"@type": "PropertyValue", "name": "GPQA Diamond", "description": "PhD-level expert questions in science"},
39
- {"@type": "PropertyValue", "name": "AIME 2025", "description": "American Invitational Mathematics Examination"},
40
- {"@type": "PropertyValue", "name": "HLE", "description": "Humanity's Last Exam — 2500 expert-sourced questions"},
41
- {"@type": "PropertyValue", "name": "ARC-AGI-2", "description": "Abstract reasoning and novel pattern recognition"},
42
- {"@type": "PropertyValue", "name": "FINAL Bench Metacognitive", "description": "AI self-correction ability measurement"},
43
- {"@type": "PropertyValue", "name": "SWE-Pro", "description": "Software engineering benchmark by Scale AI"},
44
- {"@type": "PropertyValue", "name": "IFEval", "description": "Instruction following evaluation"},
45
- {"@type": "PropertyValue", "name": "LiveCodeBench", "description": "Continuously updated coding benchmark"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  ],
47
  "distribution": [
48
  {
@@ -61,4 +136,4 @@
61
  "@type": "Organization",
62
  "name": "FINAL Bench"
63
  }
64
- }
 
2
  "@context": "https://schema.org",
3
  "@type": "Dataset",
4
  "name": "ALL Bench Leaderboard 2026",
5
+ "alternateName": [
6
+ "ALL Bench",
7
+ "ALLBench",
8
+ "AI Benchmark Leaderboard 2026"
9
+ ],
10
  "description": "The only AI benchmark leaderboard covering LLM, VLM, Agent, Image, Video, and Music generation in a single unified view. 91 models cross-verified across 6 modalities with confidence badges. Features composite 5-axis scoring, interactive comparison tools, and downloadable intelligence reports.",
11
  "url": "https://huggingface.co/spaces/FINAL-Bench/all-bench-leaderboard",
12
  "sameAs": [
 
14
  "https://github.com/final-bench/ALL-Bench-Leaderboard"
15
  ],
16
  "license": "https://opensource.org/licenses/MIT",
17
+ "version": "2.2.1",
18
  "datePublished": "2026-03-01",
19
+ "dateModified": "2026-03-10",
20
  "creator": {
21
  "@type": "Organization",
22
  "name": "ALL Bench Team",
23
  "url": "https://huggingface.co/FINAL-Bench"
24
  },
25
  "keywords": [
26
+ "AI benchmark",
27
+ "LLM leaderboard",
28
+ "GPT-5",
29
+ "Claude",
30
+ "Gemini",
31
+ "VLM benchmark",
32
+ "AI agent",
33
+ "image generation",
34
+ "video generation",
35
+ "music generation",
36
+ "MMLU-Pro",
37
+ "GPQA",
38
+ "ARC-AGI-2",
39
+ "FINAL Bench",
40
+ "metacognition",
41
+ "multimodal AI",
42
+ "AI evaluation",
43
+ "benchmark comparison",
44
+ "AI model ranking",
45
+ "open source AI"
46
  ],
47
  "about": [
48
+ {
49
+ "@type": "Thing",
50
+ "name": "Large Language Model"
51
+ },
52
+ {
53
+ "@type": "Thing",
54
+ "name": "Vision Language Model"
55
+ },
56
+ {
57
+ "@type": "Thing",
58
+ "name": "AI Benchmark"
59
+ },
60
+ {
61
+ "@type": "Thing",
62
+ "name": "Generative AI"
63
+ },
64
+ {
65
+ "@type": "Thing",
66
+ "name": "Metacognition"
67
+ }
68
  ],
69
  "measurementTechnique": "Cross-verified benchmark aggregation with 3-tier confidence system",
70
  "variableMeasured": [
71
+ {
72
+ "@type": "PropertyValue",
73
+ "name": "MMLU-Pro",
74
+ "description": "57K expert-level multi-discipline questions"
75
+ },
76
+ {
77
+ "@type": "PropertyValue",
78
+ "name": "GPQA Diamond",
79
+ "description": "PhD-level expert questions in science"
80
+ },
81
+ {
82
+ "@type": "PropertyValue",
83
+ "name": "AIME 2025",
84
+ "description": "American Invitational Mathematics Examination"
85
+ },
86
+ {
87
+ "@type": "PropertyValue",
88
+ "name": "HLE",
89
+ "description": "Humanity's Last Exam — 2500 expert-sourced questions"
90
+ },
91
+ {
92
+ "@type": "PropertyValue",
93
+ "name": "ARC-AGI-2",
94
+ "description": "Abstract reasoning and novel pattern recognition"
95
+ },
96
+ {
97
+ "@type": "PropertyValue",
98
+ "name": "FINAL Bench Metacognitive",
99
+ "description": "AI self-correction ability measurement"
100
+ },
101
+ {
102
+ "@type": "PropertyValue",
103
+ "name": "SWE-Pro",
104
+ "description": "Software engineering benchmark by Scale AI"
105
+ },
106
+ {
107
+ "@type": "PropertyValue",
108
+ "name": "IFEval",
109
+ "description": "Instruction following evaluation"
110
+ },
111
+ {
112
+ "@type": "PropertyValue",
113
+ "name": "LiveCodeBench",
114
+ "description": "Continuously updated coding benchmark"
115
+ },
116
+ {
117
+ "@type": "PropertyValue",
118
+ "name": "Union Eval S3",
119
+ "description": "ALL Bench proprietary integrated benchmark, 100% JSON auto-graded"
120
+ }
121
  ],
122
  "distribution": [
123
  {
 
136
  "@type": "Organization",
137
  "name": "FINAL Bench"
138
  }
139
+ }