Update benchmarks.csv with real BenchGecko data (413 models, 40 benchmarks)
Browse files- benchmarks.csv +41 -0
benchmarks.csv
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
slug,name,category,max_score,unit,models_tested,top_model_1,top_score_1,top_model_2,top_score_2,top_model_3,top_score_3
|
| 2 |
+
gpqa-diamond,GPQA diamond,knowledge,100,%,115,Gemini 3.1 Pro Preview,92.13,GPT-5.4,91.07,Gemini 3 Pro,90.15
|
| 3 |
+
otis-mock-aime-2024-2025,OTIS Mock AIME 2024-2025,math,100,%,105,GPT-5.2 Chat,96.11,GPT-5.2,96.11,Gemini 3.1 Pro Preview,95.6
|
| 4 |
+
mmlu,MMLU,knowledge,100,%,92,GPT-4o (2024-11-20),84.13,DeepSeek V3,82.93,Gemini 1.5 Pro (Sept 2024),82.53
|
| 5 |
+
math-level-5,MATH level 5,math,100,%,89,GPT-5 Chat,98.13,GPT-5,98.13,GPT-5 Mini,97.85
|
| 6 |
+
weirdml,WeirdML,coding,100,%,87,Claude Opus 4.6,77.9,GPT-5.2 Chat,72.2,GPT-5.2,72.2
|
| 7 |
+
simplebench,SimpleBench,reasoning,100,%,61,Gemini 3.1 Pro Preview,75.52,Gemini 3 Pro,71.68,GPT-5.4 Pro,68.92
|
| 8 |
+
frontiermath-2025-02-28-private,FrontierMath-2025-02-28-Private,math,100,%,60,GPT-5.4 Pro,50,GPT-5.4,47.6,Claude Opus 4.6,40.7
|
| 9 |
+
aider-polyglot,Aider polyglot,coding,100,%,55,GPT-5 Chat,88,GPT-5,88,o3 Pro,84.9
|
| 10 |
+
fiction-livebench,Fiction.LiveBench,knowledge,100,%,53,GPT-5 Chat,97.2,GPT-5,97.2,o3 Pro,97.2
|
| 11 |
+
arc-agi-2,ARC-AGI-2,reasoning,100,%,52,GPT-5.4 Pro,83.33,Gemini 3.1 Pro Preview,77.1,GPT-5.4,73.95
|
| 12 |
+
lech-mazur-writing,Lech Mazur Writing,knowledge,100,%,49,Kimi K2 0905,87.29,GPT-5 Chat,87.23,GPT-5,87.23
|
| 13 |
+
arc-ai2,ARC AI2,knowledge,100,%,48,DeepSeek V3,93.73,Llama 3.1-405B,93.73,Qwen2.5 72B Instruct,92.67
|
| 14 |
+
gsm8k,GSM8K,math,100,%,48,GPT-4o-mini (2024-07-18),91.3,GPT-4o-mini,91.3,Qwen2.5 Coder 32B Instruct,91.1
|
| 15 |
+
winogrande,Winogrande,knowledge,100,%,47,Llama 3.1-405B,78.4,Claude 3 Opus,77,Falcon-180B,74.2
|
| 16 |
+
frontiermath-tier-4-2025-07-01-private,FrontierMath-Tier-4-2025-07-01-Private,math,100,%,39,GPT-5.4 Pro,37.5,GPT-5.4,27.1,Claude Opus 4.6,22.9
|
| 17 |
+
bbh,BBH,reasoning,100,%,37,DeepSeek V3,83.33,Llama 3.1-405B,77.2,phi-3-medium 14B,75.2
|
| 18 |
+
hellaswag,HellaSwag,knowledge,100,%,37,Llama 3.1-405B,85.6,Falcon-180B,85.33,DeepSeek V3,85.2
|
| 19 |
+
arc-agi,ARC-AGI,reasoning,100,%,37,Gemini 3.1 Pro Preview,98,Claude Opus 4.6,94,GPT-5.2 Chat,86.2
|
| 20 |
+
simpleqa-verified,SimpleQA Verified,knowledge,100,%,36,Gemini 3.1 Pro Preview,77.3,Gemini 3 Pro,72.9,Qwen3 Max,67.47
|
| 21 |
+
piqa,PIQA,knowledge,100,%,36,GPT-4o-mini (2024-07-18),77.4,GPT-4o-mini,77.4,Gemini 1.5 Flash (Sep 2024),75
|
| 22 |
+
swe-bench-verified-bash-only,SWE-Bench Verified (Bash Only),coding,100,%,32,Claude Opus 4.5,74.4,Gemini 3 Pro,74.2,GPT-5.2 Chat,71.8
|
| 23 |
+
triviaqa,TriviaQA,knowledge,100,%,31,Llama 2-70B,87.6,Claude 2,87.5,LLaMA-65B,86
|
| 24 |
+
chess-puzzles,Chess Puzzles,knowledge,100,%,29,Gemini 3.1 Pro Preview,55,GPT-5.2 Chat,49,GPT-5.2,49
|
| 25 |
+
geobench,GeoBench,knowledge,100,%,29,Gemini 3 Flash Preview,88,Gemini 3 Pro,84,GPT-5 Chat,81
|
| 26 |
+
terminal-bench,Terminal Bench,coding,100,%,27,Gemini 3.1 Pro Preview,78.4,Claude Opus 4.6,69.9,GPT-5.2 Chat,64.9
|
| 27 |
+
hle,HLE,knowledge,100,%,27,Gemini 3 Pro,34.37,Claude Opus 4.6,31.13,GPT-5 Pro,28.19
|
| 28 |
+
openbookqa,OpenBookQA,knowledge,100,%,27,phi-3-mini 3.8B,84,phi-3-small 7.4B,84,phi-3-medium 14B,83.2
|
| 29 |
+
vpct,VPCT,knowledge,100,%,26,Gemini 3 Pro,86.5,GPT-5.2 Chat,76,GPT-5.2,76
|
| 30 |
+
gso-bench,GSO-Bench,coding,100,%,23,Claude Opus 4.6,33.33,GPT-5.2 Chat,27.4,GPT-5.2,27.4
|
| 31 |
+
apex-agents,APEX-Agents,agentic,100,%,21,GPT-5.4,35.9,GPT-5.2 Chat,34.3,GPT-5.2,34.3
|
| 32 |
+
balrog,Balrog,knowledge,100,%,20,Gemini 3 Flash Preview,48.1,Grok 4,43.6,DeepSeek-R1,34.9
|
| 33 |
+
cybench,Cybench,coding,100,%,17,Claude Sonnet 4.5,55,Claude Opus 4.1,38,Claude Opus 4,38
|
| 34 |
+
lambada,LAMBADA,knowledge,100,%,16,Falcon-180B,79.8,Llama 2-70B,78.9,LLaMA-65B,77.7
|
| 35 |
+
cadeval,CadEval,coding,100,%,15,o3,74,o4 Mini,62,o1,56
|
| 36 |
+
deepresearch-bench,DeepResearch Bench,knowledge,100,%,12,Claude Sonnet 4.5,52.6,GPT-5 Chat,51,GPT-5,51
|
| 37 |
+
videomme,VideoMME,multimodal,100,%,11,Gemini 1.5 Pro (Feb 2024),66.67,Qwen2.5 72B Instruct,64.67,GPT-4o (2024-11-20),62.53
|
| 38 |
+
the-agent-company,The Agent Company,agentic,100,%,10,DeepSeek V3.2 Exp,42.9,Claude Sonnet 4,33.1,Claude 3.7 Sonnet,30.9
|
| 39 |
+
osworld,OSWorld,agentic,100,%,8,Claude Opus 4.5,66.3,Kimi K2.5,63.3,Claude Sonnet 4.5,62.9
|
| 40 |
+
anli,ANLI,knowledge,100,%,8,phi-3-small 7.4B,37.15,Llama 3 8B Instruct,35.95,phi-3-medium 14B,33.7
|
| 41 |
+
scienceqa,ScienceQA,knowledge,100,%,5,Claude 3 Haiku,62.67,Llama 2-13B,41.04,LLaMA-13B,24.44
|