_leaderboard
stringclasses
1 value
_developer
stringclasses
559 values
_model
stringlengths
9
102
_uuid
stringlengths
36
36
schema_version
stringclasses
1 value
evaluation_id
stringlengths
35
133
retrieved_timestamp
stringlengths
13
18
source_data
stringclasses
1 value
evaluation_source_name
stringclasses
1 value
evaluation_source_type
stringclasses
1 value
source_organization_name
stringclasses
1 value
source_organization_url
null
source_organization_logo_url
null
evaluator_relationship
stringclasses
1 value
model_name
stringlengths
4
102
model_id
stringlengths
9
102
model_developer
stringclasses
559 values
model_inference_platform
stringclasses
1 value
evaluation_results
stringlengths
1.35k
1.41k
additional_details
stringclasses
660 values
HF Open LLM v2
Nitral-AI
Nitral-AI/Captain-Eris-BMO_Violent-GRPO-v0.420
e841483e-042b-4a2a-8dbc-9ed7529f7618
0.0.1
hfopenllm_v2/Nitral-AI_Captain-Eris-BMO_Violent-GRPO-v0.420/1762652579.784868
1762652579.7848692
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nitral-AI/Captain-Eris-BMO_Violent-GRPO-v0.420
Nitral-AI/Captain-Eris-BMO_Violent-GRPO-v0.420
Nitral-AI
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6312805578088361}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
Nitral-AI
Nitral-AI/Hathor_Stable-v0.2-L3-8B
2bb06e2f-9aee-4ac4-b9a6-fe537c2c9890
0.0.1
hfopenllm_v2/Nitral-AI_Hathor_Stable-v0.2-L3-8B/1762652579.7859662
1762652579.785967
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nitral-AI/Hathor_Stable-v0.2-L3-8B
Nitral-AI/Hathor_Stable-v0.2-L3-8B
Nitral-AI
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7174840534226963}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
Nitral-AI
Nitral-AI/Nera_Noctis-12B
2f5caa38-56e9-4740-baca-22fb02e57150
0.0.1
hfopenllm_v2/Nitral-AI_Nera_Noctis-12B/1762652579.786392
1762652579.7863932
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nitral-AI/Nera_Noctis-12B
Nitral-AI/Nera_Noctis-12B
Nitral-AI
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45617517076911485}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
Nitral-AI
Nitral-AI/Captain_BMO-12B
6fed7e5b-9692-40f7-913e-fc3b57b8c72a
0.0.1
hfopenllm_v2/Nitral-AI_Captain_BMO-12B/1762652579.7857668
1762652579.7857668
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nitral-AI/Captain_BMO-12B
Nitral-AI/Captain_BMO-12B
Nitral-AI
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4750595087700634}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
Nitral-AI
Nitral-AI/Captain-Eris_Violet-GRPO-v0.420
cf030461-1234-48ce-a025-ba0f52cdf191
0.0.1
hfopenllm_v2/Nitral-AI_Captain-Eris_Violet-GRPO-v0.420/1762652579.785343
1762652579.785344
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nitral-AI/Captain-Eris_Violet-GRPO-v0.420
Nitral-AI/Captain-Eris_Violet-GRPO-v0.420
Nitral-AI
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6261597007052399}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
Nitral-AI
Nitral-AI/Hathor_Tahsin-L3-8B-v0.85
a73461e6-a1f4-43c9-9a0f-f03c9be46276
0.0.1
hfopenllm_v2/Nitral-AI_Hathor_Tahsin-L3-8B-v0.85/1762652579.786179
1762652579.78618
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nitral-AI/Hathor_Tahsin-L3-8B-v0.85
Nitral-AI/Hathor_Tahsin-L3-8B-v0.85
Nitral-AI
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7110145524984818}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
ZeroXClem/Llama-3.1-8B-RainbowLight-EtherealMix
18072fb3-a27a-4ad7-93ef-a3770637a0dc
0.0.1
hfopenllm_v2/ZeroXClem_Llama-3.1-8B-RainbowLight-EtherealMix/1762652579.96684
1762652579.966841
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
ZeroXClem/Llama-3.1-8B-RainbowLight-EtherealMix
ZeroXClem/Llama-3.1-8B-RainbowLight-EtherealMix
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49734149833552754}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
ZeroXClem/Llama-3.1-8B-SuperNova-EtherealHermes
1007d3aa-f8ca-420c-b974-a0f552c649ac
0.0.1
hfopenllm_v2/ZeroXClem_Llama-3.1-8B-SuperNova-EtherealHermes/1762652579.967272
1762652579.967272
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
ZeroXClem/Llama-3.1-8B-SuperNova-EtherealHermes
ZeroXClem/Llama-3.1-8B-SuperNova-EtherealHermes
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7338705745200512}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
ZeroXClem/Llama-3.1-8B-SpecialTitanFusion
38be33eb-3dfb-4987-a2f0-14ceb9d834f7
0.0.1
hfopenllm_v2/ZeroXClem_Llama-3.1-8B-SpecialTitanFusion/1762652579.967058
1762652579.967059
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
ZeroXClem/Llama-3.1-8B-SpecialTitanFusion
ZeroXClem/Llama-3.1-8B-SpecialTitanFusion
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7402403400754443}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
ZeroXClem/Llama-3.1-8B-SuperTulu-LexiNova
ba3564f4-f48f-4548-ae15-b5f78c4b44f4
0.0.1
hfopenllm_v2/ZeroXClem_Llama-3.1-8B-SuperTulu-LexiNova/1762652579.96749
1762652579.9674911
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
ZeroXClem/Llama-3.1-8B-SuperTulu-LexiNova
ZeroXClem/Llama-3.1-8B-SuperTulu-LexiNova
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4164583305629064}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
ZeroXClem/Llama-3.1-8B-AthenaSky-MegaMix
2c35754b-3763-4098-8686-39694028e0d9
0.0.1
hfopenllm_v2/ZeroXClem_Llama-3.1-8B-AthenaSky-MegaMix/1762652579.966579
1762652579.96658
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
ZeroXClem/Llama-3.1-8B-AthenaSky-MegaMix
ZeroXClem/Llama-3.1-8B-AthenaSky-MegaMix
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.63008151704145}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH",...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
alcholjung/llama3_medical_tuned
30324407-0848-48ae-bbd7-80676d9467db
0.0.1
hfopenllm_v2/alcholjung_llama3_medical_tuned/1762652579.9813929
1762652579.9813938
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
alcholjung/llama3_medical_tuned
alcholjung/llama3_medical_tuned
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.010566408241244343}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on B...
{"precision": "float16", "architecture": "?", "params_billions": 16.061}
HF Open LLM v2
meta
Groq/Llama-3-Groq-8B-Tool-Use
636b3b4a-dc1f-4008-83ba-0d83fdcd5acb
0.0.1
hfopenllm_v2/Groq_Llama-3-Groq-8B-Tool-Use/1762652579.633301
1762652579.633302
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Groq/Llama-3-Groq-8B-Tool-Use
Groq/Llama-3-Groq-8B-Tool-Use
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6098230472922956}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
AGI-0/smartllama3.1-8B-001
c97c2d67-79d5-4813-8569-64eaefe66f89
0.0.1
hfopenllm_v2/AGI-0_smartllama3.1-8B-001/1762652579.4741051
1762652579.474106
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
AGI-0/smartllama3.1-8B-001
AGI-0/smartllama3.1-8B-001
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35178659290682057}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
AGI-0/Artificium-llama3.1-8B-001
2e3e8be1-725f-4662-a8b1-da4437018e31
0.0.1
hfopenllm_v2/AGI-0_Artificium-llama3.1-8B-001/1762652579.4738402
1762652579.473841
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
AGI-0/Artificium-llama3.1-8B-001
AGI-0/Artificium-llama3.1-8B-001
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5247687247614108}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Naveenpoliasetty/llama3-8B-V2
53ae919d-c56b-415f-87c0-c6273730357b
0.0.1
hfopenllm_v2/Naveenpoliasetty_llama3-8B-V2/1762652579.769772
1762652579.769773
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Naveenpoliasetty/llama3-8B-V2
Naveenpoliasetty/llama3-8B-V2
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4122616878770551}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
sumink/llamaft
a13b4873-22c0-461a-b4ba-41246ede0dfa
0.0.1
hfopenllm_v2/sumink_llamaft/1762652580.547796
1762652580.547797
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
sumink/llamaft
sumink/llamaft
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16086871722584964}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 3.213}
HF Open LLM v2
meta
sumink/llamamerge
f7406d3e-dbfa-4f12-946e-f4e58c728fa8
0.0.1
hfopenllm_v2/sumink_llamamerge/1762652580.547998
1762652580.547999
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
sumink/llamamerge
sumink/llamamerge
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26718107953563214}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 13.016}
HF Open LLM v2
meta
sumink/flflmillama
19f198e5-37b8-4d62-8cbe-849f6875d39e
0.0.1
hfopenllm_v2/sumink_flflmillama/1762652580.5473018
1762652580.5473018
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
sumink/flflmillama
sumink/flflmillama
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16756317681529453}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 3.213}
HF Open LLM v2
meta
nbeerbower/llama3.1-cc-8B
e011ff58-ea5c-4857-a76d-503c4188886f
0.0.1
hfopenllm_v2/nbeerbower_llama3.1-cc-8B/1762652580.385431
1762652580.385432
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
nbeerbower/llama3.1-cc-8B
nbeerbower/llama3.1-cc-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5068086011782071}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
nbeerbower/Llama-3.1-Nemotron-lorablated-70B
a9af8b88-8f00-4662-8ca4-d042030885ae
0.0.1
hfopenllm_v2/nbeerbower_Llama-3.1-Nemotron-lorablated-70B/1762652580.379643
1762652580.379644
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
nbeerbower/Llama-3.1-Nemotron-lorablated-70B
nbeerbower/Llama-3.1-Nemotron-lorablated-70B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7228797368759337}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
nbeerbower/llama-3-gutenberg-8B
144ff584-3230-42e5-acae-35518b10a1e9
0.0.1
hfopenllm_v2/nbeerbower_llama-3-gutenberg-8B/1762652580.3850691
1762652580.385074
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
nbeerbower/llama-3-gutenberg-8B
nbeerbower/llama-3-gutenberg-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4371910973993448}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
nbeerbower/Llama3.1-Gutenberg-Doppel-70B
fffd0da2-d4b0-4a11-9fd4-c0dfa0c70431
0.0.1
hfopenllm_v2/nbeerbower_Llama3.1-Gutenberg-Doppel-70B/1762652580.379898
1762652580.3798988
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
nbeerbower/Llama3.1-Gutenberg-Doppel-70B
nbeerbower/Llama3.1-Gutenberg-Doppel-70B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7092159913474027}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
nbeerbower/llama3.1-kartoffeldes-70B
c17cced5-be98-49c5-a919-c15b641ba2e7
0.0.1
hfopenllm_v2/nbeerbower_llama3.1-kartoffeldes-70B/1762652580.385698
1762652580.385699
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
nbeerbower/llama3.1-kartoffeldes-70B
nbeerbower/llama3.1-kartoffeldes-70B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8230218043679659}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2
3b02898e-b47f-4d53-9bd4-575d47df29af
0.0.1
hfopenllm_v2/Orenguteng_Llama-3.1-8B-Lexi-Uncensored-V2/1762652579.808416
1762652579.808417
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2
Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7791581891603169}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Orenguteng/Llama-3.1-8B-Lexi-Uncensored
fe095b66-350c-4236-ab1b-e2e19af73486
0.0.1
hfopenllm_v2/Orenguteng_Llama-3.1-8B-Lexi-Uncensored/1762652579.8081658
1762652579.808167
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Orenguteng/Llama-3.1-8B-Lexi-Uncensored
Orenguteng/Llama-3.1-8B-Lexi-Uncensored
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7776843220432896}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
fluently-lm/Llama-TI-8B
63a32ad0-b871-437c-991a-342de8c13345
0.0.1
hfopenllm_v2/fluently-lm_Llama-TI-8B/1762652580.156513
1762652580.156514
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
fluently-lm/Llama-TI-8B
fluently-lm/Llama-TI-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28803906966847964}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
MoonRide/Llama-3.2-3B-Khelavaster
ed373700-5ff1-4a84-8746-12ec4c278e00
0.0.1
hfopenllm_v2/MoonRide_Llama-3.2-3B-Khelavaster/1762652579.762122
1762652579.762123
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
MoonRide/Llama-3.2-3B-Khelavaster
MoonRide/Llama-3.2-3B-Khelavaster
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4924954675815725}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 3.607}
HF Open LLM v2
meta
OpenBuddy/openbuddy-qwen2.5llamaify-7b-v23.1-200k
d5f3ca22-b682-47c6-a7ba-93b401cb8c8f
0.0.1
hfopenllm_v2/OpenBuddy_openbuddy-qwen2.5llamaify-7b-v23.1-200k/1762652579.804652
1762652579.8046532
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
OpenBuddy/openbuddy-qwen2.5llamaify-7b-v23.1-200k
OpenBuddy/openbuddy-qwen2.5llamaify-7b-v23.1-200k
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5672582082208539}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 7.615}
HF Open LLM v2
meta
OpenBuddy/openbuddy-llama3-70b-v21.2-32k
3d49db5c-bcd1-4d2f-9616-c551a53bdebe
0.0.1
hfopenllm_v2/OpenBuddy_openbuddy-llama3-70b-v21.2-32k/1762652579.8002949
1762652579.8002958
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
OpenBuddy/openbuddy-llama3-70b-v21.2-32k
OpenBuddy/openbuddy-llama3-70b-v21.2-32k
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7010476646409305}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
OpenBuddy/openbuddy-llama3.2-1b-v23.1-131k
85379044-198d-4fb5-82c8-50857f8d65d0
0.0.1
hfopenllm_v2/OpenBuddy_openbuddy-llama3.2-1b-v23.1-131k/1762652579.802413
1762652579.8024142
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
OpenBuddy/openbuddy-llama3.2-1b-v23.1-131k
OpenBuddy/openbuddy-llama3.2-1b-v23.1-131k
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3590052172679601}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.498}
HF Open LLM v2
meta
OpenBuddy/openbuddy-qwen2.5llamaify-14b-v23.1-200k
489b8b24-4295-41b3-b286-14f79972fe93
0.0.1
hfopenllm_v2/OpenBuddy_openbuddy-qwen2.5llamaify-14b-v23.1-200k/1762652579.804163
1762652579.8041642
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
OpenBuddy/openbuddy-qwen2.5llamaify-14b-v23.1-200k
OpenBuddy/openbuddy-qwen2.5llamaify-14b-v23.1-200k
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.630880508162786}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH"...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 14.77}
HF Open LLM v2
meta
OpenBuddy/openbuddy-llama3-8b-v21.1-8k
2a86c8f6-2aed-4e0c-ad8a-e9ff5065a1e4
0.0.1
hfopenllm_v2/OpenBuddy_openbuddy-llama3-8b-v21.1-8k/1762652579.800596
1762652579.800596
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
OpenBuddy/openbuddy-llama3-8b-v21.1-8k
OpenBuddy/openbuddy-llama3-8b-v21.1-8k
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5569666263292509}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
OpenBuddy/openbuddy-llama3.1-70b-v22.1-131k
77d10b46-e3cf-42a0-b215-f9f8ff5ef60d
0.0.1
hfopenllm_v2/OpenBuddy_openbuddy-llama3.1-70b-v22.1-131k/1762652579.801551
1762652579.801553
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
OpenBuddy/openbuddy-llama3.1-70b-v22.1-131k
OpenBuddy/openbuddy-llama3.1-70b-v22.1-131k
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7332710541363582}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
OpenBuddy/openbuddy-llama3.2-3b-v23.2-131k
6d6e86f6-f1b7-42ef-9581-b0542e6e12ef
0.0.1
hfopenllm_v2/OpenBuddy_openbuddy-llama3.2-3b-v23.2-131k/1762652579.802651
1762652579.802652
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
OpenBuddy/openbuddy-llama3.2-3b-v23.2-131k
OpenBuddy/openbuddy-llama3.2-3b-v23.2-131k
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4319450169993395}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 3.607}
HF Open LLM v2
meta
OpenBuddy/openbuddy-llama3.1-8b-v22.3-131k
7abaa7f8-8378-496c-b5f8-ac9046eeccc8
0.0.1
hfopenllm_v2/OpenBuddy_openbuddy-llama3.1-8b-v22.3-131k/1762652579.8021362
1762652579.802138
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
OpenBuddy/openbuddy-llama3.1-8b-v22.3-131k
OpenBuddy/openbuddy-llama3.1-8b-v22.3-131k
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5997065563815123}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
OpenBuddy/openbuddy-qwen2.5llamaify-14b-v23.3-200k
ce4e7736-51d8-431a-9bef-ac2bcb3ff0fe
0.0.1
hfopenllm_v2/OpenBuddy_openbuddy-qwen2.5llamaify-14b-v23.3-200k/1762652579.8044102
1762652579.804411
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
OpenBuddy/openbuddy-qwen2.5llamaify-14b-v23.3-200k
OpenBuddy/openbuddy-qwen2.5llamaify-14b-v23.3-200k
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6131453432448126}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 14.77}
HF Open LLM v2
meta
OpenBuddy/openbuddy-llama3.1-8b-v22.2-131k
b57cd648-1503-4bbf-81d7-4ca72ac9ff27
0.0.1
hfopenllm_v2/OpenBuddy_openbuddy-llama3.1-8b-v22.2-131k/1762652579.801888
1762652579.801889
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
OpenBuddy/openbuddy-llama3.1-8b-v22.2-131k
OpenBuddy/openbuddy-llama3.1-8b-v22.2-131k
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6657269378582162}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
OpenBuddy/openbuddy-llama3.3-70b-v24.1-131k
49768a60-0b77-4945-a048-013a6fb719ca
0.0.1
hfopenllm_v2/OpenBuddy_openbuddy-llama3.3-70b-v24.1-131k/1762652579.802965
1762652579.8029802
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
OpenBuddy/openbuddy-llama3.3-70b-v24.1-131k
OpenBuddy/openbuddy-llama3.3-70b-v24.1-131k
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.812080834408259}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH"...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
OpenBuddy/openbuddy-llama3-8b-v21.2-32k
960fabe4-5395-4d3f-9680-65fe0b8655ac
0.0.1
hfopenllm_v2/OpenBuddy_openbuddy-llama3-8b-v21.2-32k/1762652579.800807
1762652579.800808
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
OpenBuddy/openbuddy-llama3-8b-v21.2-32k
OpenBuddy/openbuddy-llama3-8b-v21.2-32k
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6191904147661538}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
suayptalha/DeepSeek-R1-Distill-Llama-3B
4146ffb5-ac76-43b7-acdc-8c181f2c60d2
0.0.1
hfopenllm_v2/suayptalha_DeepSeek-R1-Distill-Llama-3B/1762652580.543217
1762652580.543217
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
suayptalha/DeepSeek-R1-Distill-Llama-3B
suayptalha/DeepSeek-R1-Distill-Llama-3B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7092658590318134}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 3.213}
HF Open LLM v2
meta
suayptalha/Komodo-Llama-3.2-3B-v2-fp16
d86e291c-cc26-475c-9ccd-e3ee68e8bee2
0.0.1
hfopenllm_v2/suayptalha_Komodo-Llama-3.2-3B-v2-fp16/1762652580.543882
1762652580.543883
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
suayptalha/Komodo-Llama-3.2-3B-v2-fp16
suayptalha/Komodo-Llama-3.2-3B-v2-fp16
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6340532010620709}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 3.0}
HF Open LLM v2
meta
necva/IE-cont-Llama3.1-8B
43f5a551-7257-4595-9b0c-60799ade231b
0.0.1
hfopenllm_v2/necva_IE-cont-Llama3.1-8B/1762652580.3888798
1762652580.388881
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
necva/IE-cont-Llama3.1-8B
necva/IE-cont-Llama3.1-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.20490742341431845}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
DavidAU/DeepSeek-MOE-4X8B-R1-Distill-Llama-3.1-Deep-Thinker-Uncensored-24B
d827463a-19cd-4bf2-8823-399b22b57387
0.0.1
hfopenllm_v2/DavidAU_DeepSeek-MOE-4X8B-R1-Distill-Llama-3.1-Deep-Thinker-Uncensored-24B/1762652579.5383239
1762652579.538326
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
DavidAU/DeepSeek-MOE-4X8B-R1-Distill-Llama-3.1-Deep-Thinker-Uncensored-24B
DavidAU/DeepSeek-MOE-4X8B-R1-Distill-Llama-3.1-Deep-Thinker-Uncensored-24B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3882564927725103}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 24.942}
HF Open LLM v2
meta
DavidAU/DeepThought-MOE-8X3B-R1-Llama-3.2-Reasoning-18B
f2b1fc61-a1c4-431c-b507-7d222ac3aedc
0.0.1
hfopenllm_v2/DavidAU_DeepThought-MOE-8X3B-R1-Llama-3.2-Reasoning-18B/1762652579.5393531
1762652579.539354
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
DavidAU/DeepThought-MOE-8X3B-R1-Llama-3.2-Reasoning-18B
DavidAU/DeepThought-MOE-8X3B-R1-Llama-3.2-Reasoning-18B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3793135547015253}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 18.405}
HF Open LLM v2
meta
DavidAU/DeepSeek-MOE-4X8B-R1-Distill-Llama-3.1-Mad-Scientist-24B
efad116f-dfc7-4a63-95b1-c61655cd7f0c
0.0.1
hfopenllm_v2/DavidAU_DeepSeek-MOE-4X8B-R1-Distill-Llama-3.1-Mad-Scientist-24B/1762652579.538624
1762652579.538625
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
DavidAU/DeepSeek-MOE-4X8B-R1-Distill-Llama-3.1-Mad-Scientist-24B
DavidAU/DeepSeek-MOE-4X8B-R1-Distill-Llama-3.1-Mad-Scientist-24B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3436182662003484}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 24.942}
HF Open LLM v2
meta
DavidAU/DeepSeek-BlackRoot-R1-Distill-Llama-3.1-8B
5e116cf4-1be5-44aa-b266-494b1e4127d3
0.0.1
hfopenllm_v2/DavidAU_DeepSeek-BlackRoot-R1-Distill-Llama-3.1-8B/1762652579.5376909
1762652579.537696
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
DavidAU/DeepSeek-BlackRoot-R1-Distill-Llama-3.1-8B
DavidAU/DeepSeek-BlackRoot-R1-Distill-Llama-3.1-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36849780803822746}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
DavidAU/DeepSeek-Grand-Horror-SMB-R1-Distill-Llama-3.1-16B
a3b69c21-b6bf-4bf9-9097-ebb26c586829
0.0.1
hfopenllm_v2/DavidAU_DeepSeek-Grand-Horror-SMB-R1-Distill-Llama-3.1-16B/1762652579.538059
1762652579.53806
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
DavidAU/DeepSeek-Grand-Horror-SMB-R1-Distill-Llama-3.1-16B
DavidAU/DeepSeek-Grand-Horror-SMB-R1-Distill-Llama-3.1-16B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2506948230694557}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 15.664}
HF Open LLM v2
meta
DavidAU/DeepHermes-3-Llama-3-8B-Preview-16.5B-Brainstorm
e2d5ee61-4d0a-4925-b3bf-016b8ff6b1b9
0.0.1
hfopenllm_v2/DavidAU_DeepHermes-3-Llama-3-8B-Preview-16.5B-Brainstorm/1762652579.537201
1762652579.537202
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
DavidAU/DeepHermes-3-Llama-3-8B-Preview-16.5B-Brainstorm
DavidAU/DeepHermes-3-Llama-3-8B-Preview-16.5B-Brainstorm
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31356799957446246}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 16.537}
HF Open LLM v2
meta
DavidAU/DeepSeek-V2-Grand-Horror-SMB-R1-Distill-Llama-3.1-Uncensored-16.5B
5af2dce8-b12c-474c-b9e2-b5a38687772d
0.0.1
hfopenllm_v2/DavidAU_DeepSeek-V2-Grand-Horror-SMB-R1-Distill-Llama-3.1-Uncensored-16.5B/1762652579.539129
1762652579.539129
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
DavidAU/DeepSeek-V2-Grand-Horror-SMB-R1-Distill-Llama-3.1-Uncensored-16.5B
DavidAU/DeepSeek-V2-Grand-Horror-SMB-R1-Distill-Llama-3.1-Uncensored-16.5B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2853162940996556}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 16.537}
HF Open LLM v2
meta
MaziyarPanahi/calme-2.1-llama3.1-70b
e216df49-368d-457f-9153-e33741b7b847
0.0.1
hfopenllm_v2/MaziyarPanahi_calme-2.1-llama3.1-70b/1762652579.751613
1762652579.7516139
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
MaziyarPanahi/calme-2.1-llama3.1-70b
MaziyarPanahi/calme-2.1-llama3.1-70b
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8434298771703524}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
MaziyarPanahi/calme-2.2-llama3-70b
8b86e8c3-eb04-41a8-91e3-3eef396aca4f
0.0.1
hfopenllm_v2/MaziyarPanahi_calme-2.2-llama3-70b/1762652579.753183
1762652579.753183
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
MaziyarPanahi/calme-2.2-llama3-70b
MaziyarPanahi/calme-2.2-llama3-70b
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8208486814984242}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
MaziyarPanahi/calme-2.2-llama3.1-70b
9112c2ec-cf0e-4d2c-9261-14ebb8706d69
0.0.1
hfopenllm_v2/MaziyarPanahi_calme-2.2-llama3.1-70b/1762652579.753403
1762652579.753404
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
MaziyarPanahi/calme-2.2-llama3.1-70b
MaziyarPanahi/calme-2.2-llama3.1-70b
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8592667455684251}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
MaziyarPanahi/calme-2.3-llama3-70b
66d7e97b-0a79-4d39-8d6b-cf083239aa93
0.0.1
hfopenllm_v2/MaziyarPanahi_calme-2.3-llama3-70b/1762652579.7547278
1762652579.7547278
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
MaziyarPanahi/calme-2.3-llama3-70b
MaziyarPanahi/calme-2.3-llama3-70b
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8010401290797307}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
MaziyarPanahi/calme-2.4-llama3-70b
8cf1e62b-f646-4082-9d10-8cf376154d40
0.0.1
hfopenllm_v2/MaziyarPanahi_calme-2.4-llama3-70b/1762652579.7565
1762652579.756501
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
MaziyarPanahi/calme-2.4-llama3-70b
MaziyarPanahi/calme-2.4-llama3-70b
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5027371817887649}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
MaziyarPanahi/calme-2.3-llama3.1-70b
7e8b2abe-68e5-445b-ae22-5b827e53b72d
0.0.1
hfopenllm_v2/MaziyarPanahi_calme-2.3-llama3.1-70b/1762652579.755093
1762652579.7550972
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
MaziyarPanahi/calme-2.3-llama3.1-70b
MaziyarPanahi/calme-2.3-llama3.1-70b
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8604657863358112}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
MaziyarPanahi/calme-3.1-llamaloi-3b
0acfe83d-3876-4c08-9b26-931450d24bfd
0.0.1
hfopenllm_v2/MaziyarPanahi_calme-3.1-llamaloi-3b/1762652579.758682
1762652579.758683
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
MaziyarPanahi/calme-3.1-llamaloi-3b
MaziyarPanahi/calme-3.1-llamaloi-3b
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7375175645066203}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 3.213}
HF Open LLM v2
meta
PJMixers-Dev/LLaMa-3.1-RomboTiesTest2-8B
dbfe2c89-a7c8-4fe5-95a1-cf1a58b6f55c
0.0.1
hfopenllm_v2/PJMixers-Dev_LLaMa-3.1-RomboTiesTest2-8B/1762652579.810312
1762652579.810313
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
PJMixers-Dev/LLaMa-3.1-RomboTiesTest2-8B
PJMixers-Dev/LLaMa-3.1-RomboTiesTest2-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7825303527972447}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 4.015}
HF Open LLM v2
meta
PJMixers-Dev/LLaMa-3.1-RomboTiesTest-8B
0130c0ac-a790-492d-aac2-55e999b724ef
0.0.1
hfopenllm_v2/PJMixers-Dev_LLaMa-3.1-RomboTiesTest-8B/1762652579.8100638
1762652579.8100648
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
PJMixers-Dev/LLaMa-3.1-RomboTiesTest-8B
PJMixers-Dev/LLaMa-3.1-RomboTiesTest-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7825303527972447}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 4.015}
HF Open LLM v2
meta
pszemraj/Llama-3-6.3b-v0.1
74260e1f-8b2d-40ac-ac96-f268d65fa838
0.0.1
hfopenllm_v2/pszemraj_Llama-3-6.3b-v0.1/1762652580.4812942
1762652580.481295
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
pszemraj/Llama-3-6.3b-v0.1
pszemraj/Llama-3-6.3b-v0.1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.10438968603305895}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 6.3}
HF Open LLM v2
meta
yuvraj17/Llama3-8B-SuperNova-Spectrum-dare_ties
2bde390d-b448-4ac2-addd-215d722aa66b
0.0.1
hfopenllm_v2/yuvraj17_Llama3-8B-SuperNova-Spectrum-dare_ties/1762652580.6118348
1762652580.6118348
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
yuvraj17/Llama3-8B-SuperNova-Spectrum-dare_ties
yuvraj17/Llama3-8B-SuperNova-Spectrum-dare_ties
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4012708502329375}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
yuvraj17/Llama3-8B-abliterated-Spectrum-slerp
45cd6db1-064f-45d9-89f2-d931b4f82326
0.0.1
hfopenllm_v2/yuvraj17_Llama3-8B-abliterated-Spectrum-slerp/1762652580.6120949
1762652580.612096
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
yuvraj17/Llama3-8B-abliterated-Spectrum-slerp
yuvraj17/Llama3-8B-abliterated-Spectrum-slerp
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2884878788281759}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
amd/AMD-Llama-135m
086ca0cf-79a3-4b94-980d-9384f1848562
0.0.1
hfopenllm_v2/amd_AMD-Llama-135m/1762652580.010782
1762652580.010783
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
amd/AMD-Llama-135m
amd/AMD-Llama-135m
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.19184319826948054}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 0.134}
HF Open LLM v2
meta
amd/AMD-Llama-135m
4a623195-2073-4637-b748-696012109846
0.0.1
hfopenllm_v2/amd_AMD-Llama-135m/1762652580.010537
1762652580.010538
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
amd/AMD-Llama-135m
amd/AMD-Llama-135m
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.18422452426229072}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 0.135}
HF Open LLM v2
meta
allenai/Llama-3.1-Tulu-3-70B
5683ed15-2699-4f0c-8e74-a65ff2d4dd49
0.0.1
hfopenllm_v2/allenai_Llama-3.1-Tulu-3-70B/1762652579.981919
1762652579.981919
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allenai/Llama-3.1-Tulu-3-70B
allenai/Llama-3.1-Tulu-3-70B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8379344583482937}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
allenai/Llama-3.1-Tulu-3-70B
006cafcb-452f-4df0-b42c-058719eb63e4
0.0.1
hfopenllm_v2/allenai_Llama-3.1-Tulu-3-70B/1762652579.981659
1762652579.981659
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allenai/Llama-3.1-Tulu-3-70B
allenai/Llama-3.1-Tulu-3-70B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8291167435737177}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
allenai/Llama-3.1-Tulu-3-8B-RM
1a363aad-a1e7-404e-8c4a-4132f4fbab2b
0.0.1
hfopenllm_v2/allenai_Llama-3.1-Tulu-3-8B-RM/1762652579.9831831
1762652579.9831831
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allenai/Llama-3.1-Tulu-3-8B-RM
allenai/Llama-3.1-Tulu-3-8B-RM
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16701352411601217}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForSequenceClassification", "params_billions": 8.0}
HF Open LLM v2
meta
allenai/Llama-3.1-Tulu-3-8B
8a7c4b5a-85c7-4fc6-af4c-e9cde5d32d8b
0.0.1
hfopenllm_v2/allenai_Llama-3.1-Tulu-3-8B/1762652579.982752
1762652579.982752
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allenai/Llama-3.1-Tulu-3-8B
allenai/Llama-3.1-Tulu-3-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8254697535871487}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
allenai/Llama-3.1-Tulu-3-8B
5ad18861-1b4d-456d-9e1c-e945c1f71530
0.0.1
hfopenllm_v2/allenai_Llama-3.1-Tulu-3-8B/1762652579.9825459
1762652579.982547
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allenai/Llama-3.1-Tulu-3-8B
allenai/Llama-3.1-Tulu-3-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8266687943545348}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
mkurman/llama-3.2-MEDIT-3B-o1
43a51d6d-e038-4476-a63b-2f4260d736d4
0.0.1
hfopenllm_v2/mkurman_llama-3.2-MEDIT-3B-o1/1762652580.365804
1762652580.3658051
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
mkurman/llama-3.2-MEDIT-3B-o1
mkurman/llama-3.2-MEDIT-3B-o1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43816517950150047}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 3.607}
HF Open LLM v2
meta
Corianas/llama-3-reactor
0670ba93-c3d6-4a74-94e4-4a77311d4984
0.0.1
hfopenllm_v2/Corianas_llama-3-reactor/1762652579.5122728
1762652579.512274
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Corianas/llama-3-reactor
Corianas/llama-3-reactor
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.23001192391742797}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": -1.0}
HF Open LLM v2
meta
maldv/badger-lambda-llama-3-8b
18ae9d71-15e0-4d11-86c0-9cac4dbaa3f3
0.0.1
hfopenllm_v2/maldv_badger-lambda-llama-3-8b/1762652580.331519
1762652580.33152
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
maldv/badger-lambda-llama-3-8b
maldv/badger-lambda-llama-3-8b
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4860758343417687}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
maldv/badger-writer-llama-3-8b
7c88458f-e9a0-4e90-b5ed-dbdb6fd49b9d
0.0.1
hfopenllm_v2/maldv_badger-writer-llama-3-8b/1762652580.332005
1762652580.332005
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
maldv/badger-writer-llama-3-8b
maldv/badger-writer-llama-3-8b
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5303140112678804}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
maldv/badger-mu-llama-3-8b
d43699f9-e6e5-428b-ab52-9d7114443608
0.0.1
hfopenllm_v2/maldv_badger-mu-llama-3-8b/1762652580.3317509
1762652580.3317518
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
maldv/badger-mu-llama-3-8b
maldv/badger-mu-llama-3-8b
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49194581488229006}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
maldv/badger-kappa-llama-3-8b
32e1b138-c236-48e3-8152-d3715127d309
0.0.1
hfopenllm_v2/maldv_badger-kappa-llama-3-8b/1762652580.331178
1762652580.331179
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
maldv/badger-kappa-llama-3-8b
maldv/badger-kappa-llama-3-8b
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46946435457918323}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
mkxu/llama-3-8b-po1
e26ea6fd-723d-45de-b0f1-5bcbae1eb992
0.0.1
hfopenllm_v2/mkxu_llama-3-8b-po1/1762652580.3669372
1762652580.366938
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
mkxu/llama-3-8b-po1
mkxu/llama-3-8b-po1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4081149128756145}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
glaiveai/Reflection-Llama-3.1-70B
3e8ba765-d24b-4ffe-a816-21ea02b7ba14
0.0.1
hfopenllm_v2/glaiveai_Reflection-Llama-3.1-70B/1762652580.164674
1762652580.164675
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
glaiveai/Reflection-Llama-3.1-70B
glaiveai/Reflection-Llama-3.1-70B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5990571683134085}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 69.5}
HF Open LLM v2
meta
akjindal53244/Llama-3.1-Storm-8B
de2d2321-b6ed-4791-9114-757afc963876
0.0.1
hfopenllm_v2/akjindal53244_Llama-3.1-Storm-8B/1762652579.981211
1762652579.981212
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
akjindal53244/Llama-3.1-Storm-8B
akjindal53244/Llama-3.1-Storm-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8050616807847621}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
akjindal53244/Llama-3.1-Storm-8B
f9aad6f2-ba24-47de-a613-b4011a2c52d1
0.0.1
hfopenllm_v2/akjindal53244_Llama-3.1-Storm-8B/1762652579.980961
1762652579.980962
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
akjindal53244/Llama-3.1-Storm-8B
akjindal53244/Llama-3.1-Storm-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.803263119633683}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH"...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
khoantap/llama-breadcrumbs-ties-merge
9eae434a-fb2a-45b9-a592-f39a9c469f07
0.0.1
hfopenllm_v2/khoantap_llama-breadcrumbs-ties-merge/1762652580.307606
1762652580.307607
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
khoantap/llama-breadcrumbs-ties-merge
khoantap/llama-breadcrumbs-ties-merge
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.22051933314716063}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
khoantap/llama-linear-0.5-0.5-1-merge
0906fee9-0edd-494f-bf01-a34711f17596
0.0.1
hfopenllm_v2/khoantap_llama-linear-0.5-0.5-1-merge/1762652580.3081899
1762652580.308191
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
khoantap/llama-linear-0.5-0.5-1-merge
khoantap/llama-linear-0.5-0.5-1-merge
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48122980358781364}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
khoantap/llama-linear-1-0.5-0.5-merge
49e5e4e4-6905-4b9e-9f53-b7ac598b5102
0.0.1
hfopenllm_v2/khoantap_llama-linear-1-0.5-0.5-merge/1762652580.308746
1762652580.308747
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
khoantap/llama-linear-1-0.5-0.5-merge
khoantap/llama-linear-1-0.5-0.5-merge
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45145436331156885}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
khoantap/llama-slerp-merge
e30c2825-6d36-454c-8787-e5cbdfcbcfdf
0.0.1
hfopenllm_v2/khoantap_llama-slerp-merge/1762652580.308971
1762652580.3089721
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
khoantap/llama-slerp-merge
khoantap/llama-slerp-merge
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49799088640363126}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
khoantap/llama-linear-0.5-1-0.5-merge
88d174f6-6d30-4859-bbf0-6f5446ce1b9d
0.0.1
hfopenllm_v2/khoantap_llama-linear-0.5-1-0.5-merge/1762652580.308497
1762652580.308498
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
khoantap/llama-linear-0.5-1-0.5-merge
khoantap/llama-linear-0.5-1-0.5-merge
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5031616111916382}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
khoantap/llama-3-8b-stock-merge
211ac2a5-5bd1-4347-8eb8-fa1bd4b1a5ad
0.0.1
hfopenllm_v2/khoantap_llama-3-8b-stock-merge/1762652580.307331
1762652580.307332
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
khoantap/llama-3-8b-stock-merge
khoantap/llama-3-8b-stock-merge
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48117993590340297}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
khoantap/llama-evolve-ties-best-merge
0ab7f323-1be5-4fc7-a5d8-d4f77f802da3
0.0.1
hfopenllm_v2/khoantap_llama-evolve-ties-best-merge/1762652580.307874
1762652580.3078752
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
khoantap/llama-evolve-ties-best-merge
khoantap/llama-evolve-ties-best-merge
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6743950495795601}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Magpie-Align/Llama-3.1-8B-Magpie-Align-v0.1
80e08062-397f-40d4-b6b2-a3e03d9cc320
0.0.1
hfopenllm_v2/Magpie-Align_Llama-3.1-8B-Magpie-Align-v0.1/1762652579.744737
1762652579.744738
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Magpie-Align/Llama-3.1-8B-Magpie-Align-v0.1
Magpie-Align/Llama-3.1-8B-Magpie-Align-v0.1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4457838535086903}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Magpie-Align/Llama-3-8B-Magpie-Align-v0.1
ced5680b-ff4a-42be-a609-6fc2541d6109
0.0.1
hfopenllm_v2/Magpie-Align_Llama-3-8B-Magpie-Align-v0.1/1762652579.743867
1762652579.7438679
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Magpie-Align/Llama-3-8B-Magpie-Align-v0.1
Magpie-Align/Llama-3-8B-Magpie-Align-v0.1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4118117705465941}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Magpie-Align/Llama-3-8B-Magpie-Align-v0.1
c819ae59-5f32-4bba-a835-84fa9497de6b
0.0.1
hfopenllm_v2/Magpie-Align_Llama-3-8B-Magpie-Align-v0.1/1762652579.744125
1762652579.7441258
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Magpie-Align/Llama-3-8B-Magpie-Align-v0.1
Magpie-Align/Llama-3-8B-Magpie-Align-v0.1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4027192294223771}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Magpie-Align/Llama-3-8B-Magpie-Align-v0.3
f58be76c-043d-4ad9-81df-9a94d380808c
0.0.1
hfopenllm_v2/Magpie-Align_Llama-3-8B-Magpie-Align-v0.3/1762652579.7443142
1762652579.744315
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Magpie-Align/Llama-3-8B-Magpie-Align-v0.3
Magpie-Align/Llama-3-8B-Magpie-Align-v0.3
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44970566984490046}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
kevin009/llamaRAGdrama
41e4d24f-9790-40f5-a915-ee4155d5cbc6
0.0.1
hfopenllm_v2/kevin009_llamaRAGdrama/1762652580.3065941
1762652580.3065941
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
kevin009/llamaRAGdrama
kevin009/llamaRAGdrama
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2598372318780835}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
meta
Replete-AI/Replete-Coder-Llama3-8B
c8b29113-7815-4cf3-be36-76e3e87d6068
0.0.1
hfopenllm_v2/Replete-AI_Replete-Coder-Llama3-8B/1762652579.851821
1762652579.851821
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Replete-AI/Replete-Coder-Llama3-8B
Replete-AI/Replete-Coder-Llama3-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4729362535849324}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Replete-AI/Replete-LLM-V2-Llama-3.1-8b
c3977d28-b18d-4e86-bc69-1aa08422585c
0.0.1
hfopenllm_v2/Replete-AI_Replete-LLM-V2-Llama-3.1-8b/1762652579.8529909
1762652579.852992
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Replete-AI/Replete-LLM-V2-Llama-3.1-8b
Replete-AI/Replete-LLM-V2-Llama-3.1-8b
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5514966954347797}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
TheDrummer/Llama-3SOME-8B-v2
8f4349ad-76e7-4ce5-9121-fef2e376b4bc
0.0.1
hfopenllm_v2/TheDrummer_Llama-3SOME-8B-v2/1762652579.914594
1762652579.9145951
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
TheDrummer/Llama-3SOME-8B-v2
TheDrummer/Llama-3SOME-8B-v2
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4508049752434651}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_Hermedive_R1_V1.03
e73d5aee-ad0f-4bec-8230-2087669444bb
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_Hermedive_R1_V1.03/1762652579.776387
1762652579.7763882
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_Hermedive_R1_V1.03
Nexesenex/Llama_3.1_8b_Hermedive_R1_V1.03
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6647528557560606}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Nexesenex/Llama_3.2_3b_Kermes_v2
a3d85774-ddac-436f-9c64-a751d2924bb5
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.2_3b_Kermes_v2/1762652579.781325
1762652579.781326
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.2_3b_Kermes_v2
Nexesenex/Llama_3.2_3b_Kermes_v2
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5753766672429155}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 3.213}
HF Open LLM v2
meta
Nexesenex/Llama_3.2_1b_Odyssea_V1
deb8be23-8976-4dfb-b038-70a4b77de9f6
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.2_1b_Odyssea_V1/1762652579.77868
1762652579.77868
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.2_1b_Odyssea_V1
Nexesenex/Llama_3.2_1b_Odyssea_V1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2552660274737696}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.498}
HF Open LLM v2
meta
Nexesenex/Llama_3.2_1b_AquaSyn_0.11
d3e57fb7-44cb-408a-9ed6-6387b1f0a543
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.2_1b_AquaSyn_0.11/1762652579.778271
1762652579.778271
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.2_1b_AquaSyn_0.11
Nexesenex/Llama_3.2_1b_AquaSyn_0.11
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24312601674667658}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.498}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_DodoWild_v2.03
3b2b7ebc-be82-4d7d-8bc8-e718513d164c
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_DodoWild_v2.03/1762652579.7746859
1762652579.774687
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_DodoWild_v2.03
Nexesenex/Llama_3.1_8b_DodoWild_v2.03
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7941207108250552}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Nexesenex/Llama_3.2_1b_AquaSyn_0.1
4b512748-f6d0-4ed0-8ece-5b853a174329
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.2_1b_AquaSyn_0.1/1762652579.7780669
1762652579.778068
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.2_1b_AquaSyn_0.1
Nexesenex/Llama_3.2_1b_AquaSyn_0.1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2741004977903075}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.498}