OpenMarkAI commited on
Commit
299162b
·
verified ·
1 Parent(s): 011c907

Upload benchmark_1612b21d.csv

Browse files
Files changed (1) hide show
  1. benchmark_1612b21d.csv +12 -0
benchmark_1612b21d.csv ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "Model","Provider","Score (%)","Score (Raw)","Max Score","Stability","Rec. Temp","Pricing Tier","Cost ($)","Time (s)","Acc/$","Acc/min","Completion (%)","Input Tokens (avg/run)","Output Tokens (avg/run)","Status"
2
+ "claude-sonnet-4.6","anthropic","50.0","2.00","4.00","±0.000","0.3","High","0.014787","8.19","135.25","14.65","100.0","4849","16","completed"
3
+ "claude-opus-4.6","anthropic","50.0","2.00","4.00","±0.000","0.3","Very High","0.024645","12.41","81.15","9.67","100.0","4849","16","completed"
4
+ "gpt-5.2","openai","75.0","3.00","4.00","±0.000","0.3","High","0.008537","7.59","351.43","23.72","100.0","4750","16","completed"
5
+ "Qwen3.5-397B-A17B","qwen","50.0","2.00","4.00","±0.000","0.3","Medium","0.007312","87.39","273.54","1.37","25.0","3372","1469","completed"
6
+ "grok-4-1-fast-reasoning","xai","57.5","2.30","4.00","±1.000","0.3","Low","0.000917","15.89","2507.63","8.68","100.0","1816","1108","completed"
7
+ "mistral-medium-latest","mistral","42.5","1.70","4.00","±1.000","0.3","Medium","0.002189","6.73","776.68","15.17","100.0","5417","11","completed"
8
+ "sonar","perplexity","57.5","2.30","4.00","±1.000","0.3","Medium","0.025590","11.83","89.88","11.66","100.0","5595","4","completed"
9
+ "llama4-maverick","meta","50.0","2.00","4.00","±0.000","0.3","Low","0.002023","7.59","988.82","15.80","100.0","7466","8","completed"
10
+ "gemini-3-pro","gemini","75.0","3.00","4.00","±0.000","0.3","High","0.061390","65.10","48.87","2.77","100.0","4535","4360","completed"
11
+ "gemini-3-flash","gemini","67.5","2.70","4.00","±1.000","0.3","Medium","0.005999","14.67","450.04","11.04","100.0","4535","1244","completed"
12
+ "gemini-3.1-pro","gemini","75.0","3.00","4.00","±0.000","0.3","High","0.028054","27.59","106.94","6.52","100.0","4535","1582","completed"