_leaderboard
stringclasses
1 value
_developer
stringclasses
559 values
_model
stringlengths
9
102
_uuid
stringlengths
36
36
schema_version
stringclasses
1 value
evaluation_id
stringlengths
35
133
retrieved_timestamp
stringlengths
13
18
source_data
stringclasses
1 value
evaluation_source_name
stringclasses
1 value
evaluation_source_type
stringclasses
1 value
source_organization_name
stringclasses
1 value
source_organization_url
null
source_organization_logo_url
null
evaluator_relationship
stringclasses
1 value
model_name
stringlengths
4
102
model_id
stringlengths
9
102
model_developer
stringclasses
559 values
model_inference_platform
stringclasses
1 value
evaluation_results
stringlengths
1.35k
1.41k
additional_details
stringclasses
660 values
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_Dolermed_R1_V1.03
b1f9e472-38c5-409f-b112-3006bca90b94
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_Dolermed_R1_V1.03/1762652579.7753332
1762652579.775334
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_Dolermed_R1_V1.03
Nexesenex/Llama_3.1_8b_Dolermed_R1_V1.03
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7564019025075688}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Nexesenex/Llama_3.2_1b_Sydonia_0.1
980cf18c-0163-414c-8ed0-dff894a328ee
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.2_1b_Sydonia_0.1/1762652579.780214
1762652579.780215
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.2_1b_Sydonia_0.1
Nexesenex/Llama_3.2_1b_Sydonia_0.1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.21967047434141412}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.498}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_Stormeder_v1.04
e831c8bd-5bdd-4f00-9c91-ab4b29dfc66c
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_Stormeder_v1.04/1762652579.777617
1762652579.777618
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_Stormeder_v1.04
Nexesenex/Llama_3.1_8b_Stormeder_v1.04
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7852531283660686}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_Dolerstormed_V1.04
9d44d069-44b1-414a-93c1-91b46ceabe66
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_Dolerstormed_V1.04/1762652579.775745
1762652579.775746
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_Dolerstormed_V1.04
Nexesenex/Llama_3.1_8b_Dolerstormed_V1.04
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7889001183526376}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_DeepDive_3_Prev_v1.0
67010272-067a-4dd4-a31d-9da58d72118e
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_DeepDive_3_Prev_v1.0/1762652579.7727091
1762652579.7727098
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_DeepDive_3_Prev_v1.0
Nexesenex/Llama_3.1_8b_DeepDive_3_Prev_v1.0
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6809144181881852}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_Hermedive_V1.01
99589a08-8f1e-437e-b6f0-e33a9dab5806
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_Hermedive_V1.01/1762652579.776601
1762652579.776602
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_Hermedive_V1.01
Nexesenex/Llama_3.1_8b_Hermedive_V1.01
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5061592131101034}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.031}
HF Open LLM v2
meta
Nexesenex/Llama_3.2_1b_RandomLego_RP_R1_0.1
8254ed33-9ce6-484d-9171-5402156a1933
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.2_1b_RandomLego_RP_R1_0.1/1762652579.779787
1762652579.779788
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.2_1b_RandomLego_RP_R1_0.1
Nexesenex/Llama_3.2_1b_RandomLego_RP_R1_0.1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5542693386880144}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.498}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_DobHerWild_R1_v1.1R
bedae6ba-9f3b-435b-bb7f-cadb7a684804
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_DobHerWild_R1_v1.1R/1762652579.773223
1762652579.7732239
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_DobHerWild_R1_v1.1R
Nexesenex/Llama_3.1_8b_DobHerWild_R1_v1.1R
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.759999024809727}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH"...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_DodoWild_v2.10
ca49f981-e4eb-4235-b472-de832ffedd72
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_DodoWild_v2.10/1762652579.7749188
1762652579.7749188
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_DodoWild_v2.10
Nexesenex/Llama_3.1_8b_DodoWild_v2.10
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8053863748188141}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_Hermedive_R1_V1.01
82f2d97c-e8d2-47a4-a56b-af781b98ba0b
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_Hermedive_R1_V1.01/1762652579.7761788
1762652579.7761788
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_Hermedive_R1_V1.01
Nexesenex/Llama_3.1_8b_Hermedive_R1_V1.01
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5001141415887622}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_Medusa_v1.01
01b841ba-ecb1-4025-91b7-fb2c443ef85c
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_Medusa_v1.01/1762652579.777005
1762652579.7770061
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_Medusa_v1.01
Nexesenex/Llama_3.1_8b_Medusa_v1.01
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7685419132346618}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.031}
HF Open LLM v2
meta
Nexesenex/Dolphin3.0-Llama3.1-1B-abliterated
ed950058-9f6b-4ed6-9d41-0d2674dc19d1
0.0.1
hfopenllm_v2/Nexesenex_Dolphin3.0-Llama3.1-1B-abliterated/1762652579.772268
1762652579.772269
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Dolphin3.0-Llama3.1-1B-abliterated
Nexesenex/Dolphin3.0-Llama3.1-1B-abliterated
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5311883580012146}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.236}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_Mediver_V1.01
35eb03f0-f11e-40d8-a830-7ce2cfde2956
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_Mediver_V1.01/1762652579.7768
1762652579.776801
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_Mediver_V1.01
Nexesenex/Llama_3.1_8b_Mediver_V1.01
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.18847103463255274}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.031}
HF Open LLM v2
meta
Nexesenex/Llama_3.2_1b_Dolto_0.1
dae3d027-e262-462c-9930-cfee221cef58
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.2_1b_Dolto_0.1/1762652579.778476
1762652579.778477
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.2_1b_Dolto_0.1
Nexesenex/Llama_3.2_1b_Dolto_0.1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5433782364127182}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.498}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_DodoWild_v2.02
f8448236-89b9-4a9c-949b-9bb45db5e400
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_DodoWild_v2.02/1762652579.774375
1762652579.774376
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_DodoWild_v2.02
Nexesenex/Llama_3.1_8b_DodoWild_v2.02
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8016895171478344}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_Dolermed_R1_V1.01
ca856917-9100-41ea-9900-91d12be1de44
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_Dolermed_R1_V1.01/1762652579.775126
1762652579.775127
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_Dolermed_R1_V1.01
Nexesenex/Llama_3.1_8b_Dolermed_R1_V1.01
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7533544329046928}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_DeepDive_3_R1_Prev_v1.0
9aa57eda-6d6a-449e-801d-96e16499ddd6
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_DeepDive_3_R1_Prev_v1.0/1762652579.772983
1762652579.772984
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_DeepDive_3_R1_Prev_v1.0
Nexesenex/Llama_3.1_8b_DeepDive_3_R1_Prev_v1.0
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7100903380807368}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_Smarteaz_0.2_R1
1cbff8d9-a857-4816-8427-0450871021d6
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_Smarteaz_0.2_R1/1762652579.777212
1762652579.777212
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_Smarteaz_0.2_R1
Nexesenex/Llama_3.1_8b_Smarteaz_0.2_R1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6345529860769425}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Nexesenex/Llama_3.2_3b_Kermes_v2.1
f4686eff-f1d7-49e0-85be-2a6c7f125e29
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.2_3b_Kermes_v2.1/1762652579.781543
1762652579.781544
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.2_3b_Kermes_v2.1
Nexesenex/Llama_3.2_3b_Kermes_v2.1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5583906257618674}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 3.213}
HF Open LLM v2
meta
Nexesenex/Llama_3.2_1b_Odyssea_V1.01
f3922129-7e69-495d-925b-c3c8a1b70c5a
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.2_1b_Odyssea_V1.01/1762652579.778893
1762652579.7788942
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.2_1b_Odyssea_V1.01
Nexesenex/Llama_3.2_1b_Odyssea_V1.01
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24954564998648032}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.498}
HF Open LLM v2
meta
Nexesenex/Llama_3.2_3b_Kermes_v1
f81acd72-b38a-424a-878b-833d094518da
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.2_3b_Kermes_v1/1762652579.781107
1762652579.781108
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.2_3b_Kermes_v1
Nexesenex/Llama_3.2_3b_Kermes_v1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4851759996808468}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 3.213}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_DoberWild_v2.01
8a3df59d-9f38-4682-a760-5fa7903cab99
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_DoberWild_v2.01/1762652579.7734542
1762652579.7734542
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_DoberWild_v2.01
Nexesenex/Llama_3.1_8b_DoberWild_v2.01
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7995662619627034}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.031}
HF Open LLM v2
meta
Nexesenex/Llama_3.2_1b_Syneridol_0.2
99397e12-f601-478c-af40-c8f428b923a8
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.2_1b_Syneridol_0.2/1762652579.780447
1762652579.780447
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.2_1b_Syneridol_0.2
Nexesenex/Llama_3.2_1b_Syneridol_0.2
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.21574865800520399}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.498}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_DoberWild_v2.02
62ef54cd-d97d-473e-9dd2-42fe185e4d04
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_DoberWild_v2.02/1762652579.7736902
1762652579.773691
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_DoberWild_v2.02
Nexesenex/Llama_3.1_8b_DoberWild_v2.02
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7746368524404137}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Nexesenex/Llama_3.2_1b_Synopsys_0.1
00ccf406-3e59-44cb-af59-6dcd391678ff
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.2_1b_Synopsys_0.1/1762652579.780673
1762652579.780674
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.2_1b_Synopsys_0.1
Nexesenex/Llama_3.2_1b_Synopsys_0.1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17638089158987041}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.498}
HF Open LLM v2
meta
Nexesenex/Llama_3.2_1b_OpenTree_R1_0.1
11c52cd6-75e0-4800-9b98-fbc4aa81260d
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.2_1b_OpenTree_R1_0.1/1762652579.779097
1762652579.779098
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.2_1b_OpenTree_R1_0.1
Nexesenex/Llama_3.2_1b_OpenTree_R1_0.1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5366339091388627}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.498}
HF Open LLM v2
meta
Nexesenex/Llama_3.2_1b_SunOrca_V1
848752ff-c92d-4ce2-94e8-5b8c8b765b77
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.2_1b_SunOrca_V1/1762652579.7800052
1762652579.780006
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.2_1b_SunOrca_V1
Nexesenex/Llama_3.2_1b_SunOrca_V1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.542953807009845}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH"...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.498}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_DodoWild_v2.01
78ecc0f4-dcd5-4c25-a598-ef95114f5868
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_DodoWild_v2.01/1762652579.7741492
1762652579.7741492
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_DodoWild_v2.01
Nexesenex/Llama_3.1_8b_DodoWild_v2.01
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7977677008116243}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.031}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_Typhoon_v1.03
6043c193-a533-4194-8cf5-9ed83d095f0d
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_Typhoon_v1.03/1762652579.7778199
1762652579.7778208
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_Typhoon_v1.03
Nexesenex/Llama_3.1_8b_Typhoon_v1.03
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8078343240379969}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_DoberWild_v2.03
b81cbefe-7c08-4bc2-979f-10caf20fa9fa
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_DoberWild_v2.03/1762652579.7739289
1762652579.77393
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_DoberWild_v2.03
Nexesenex/Llama_3.1_8b_DoberWild_v2.03
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7764354135914928}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_Dolermed_V1.01
4733fd17-2d7a-44cd-83bf-1201a3173495
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_Dolermed_V1.01/1762652579.775538
1762652579.775538
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_Dolermed_V1.01
Nexesenex/Llama_3.1_8b_Dolermed_V1.01
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.508657030013697}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH"...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.031}
HF Open LLM v2
meta
Nexesenex/Llama_3.2_1b_Synopsys_0.11
6e4a0c11-2349-4846-9d9b-ccf6ef9ea43a
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.2_1b_Synopsys_0.11/1762652579.780885
1762652579.780886
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.2_1b_Synopsys_0.11
Nexesenex/Llama_3.2_1b_Synopsys_0.11
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28421698870109086}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.498}
HF Open LLM v2
meta
Nexesenex/Llama_3.2_1b_OrcaSun_V1
dd17eeb9-c1d1-4f98-986e-aad15a592891
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.2_1b_OrcaSun_V1/1762652579.779477
1762652579.779478
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.2_1b_OrcaSun_V1
Nexesenex/Llama_3.2_1b_OrcaSun_V1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5948605256275571}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.498}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_Smarteaz_V1.01
10cc1ce1-986e-44f5-b14e-a7b44d9de68d
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_Smarteaz_V1.01/1762652579.777418
1762652579.777418
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_Smarteaz_V1.01
Nexesenex/Llama_3.1_8b_Smarteaz_V1.01
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8151283040111349}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Nexesenex/Llama_3.1_8b_Hermedash_R1_V1.04
615e5bca-6f64-4bf9-a131-eefd7ec32c08
0.0.1
hfopenllm_v2/Nexesenex_Llama_3.1_8b_Hermedash_R1_V1.04/1762652579.775957
1762652579.775958
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Nexesenex/Llama_3.1_8b_Hermedash_R1_V1.04
Nexesenex/Llama_3.1_8b_Hermedash_R1_V1.04
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7871514248859692}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
TinyLlama/TinyLlama_v1.1
e81db661-b05a-4d95-8be4-d663317d3d13
0.0.1
hfopenllm_v2/TinyLlama_TinyLlama_v1.1/1762652579.919856
1762652579.9198568
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
TinyLlama/TinyLlama_v1.1
TinyLlama/TinyLlama_v1.1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.20006139266036338}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.1}
HF Open LLM v2
meta
TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
99c5044d-1308-4f30-9413-bc2672545f76
0.0.1
hfopenllm_v2/TinyLlama_TinyLlama-1.1B-intermediate-step-1431k-3T/1762652579.9195771
1762652579.919578
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.22766371006706648}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.1}
HF Open LLM v2
meta
refuelai/Llama-3-Refueled
2f104869-3a3b-4d25-987b-77dba089b817
0.0.1
hfopenllm_v2/refuelai_Llama-3-Refueled/1762652580.494146
1762652580.494147
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
refuelai/Llama-3-Refueled
refuelai/Llama-3-Refueled
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4619952836252255}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
gbueno86/Meta-LLama-3-Cat-Smaug-LLama-70b
9b7181ec-81f6-438a-8af6-a219f356f430
0.0.1
hfopenllm_v2/gbueno86_Meta-LLama-3-Cat-Smaug-LLama-70b/1762652580.1641119
1762652580.1641128
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
gbueno86/Meta-LLama-3-Cat-Smaug-LLama-70b
gbueno86/Meta-LLama-3-Cat-Smaug-LLama-70b
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8071849359698933}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
gbueno86/Brinebreath-Llama-3.1-70B
12e0e194-ef37-4da5-9354-e82f983fadb2
0.0.1
hfopenllm_v2/gbueno86_Brinebreath-Llama-3.1-70B/1762652580.1638331
1762652580.163834
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
gbueno86/Brinebreath-Llama-3.1-70B
gbueno86/Brinebreath-Llama-3.1-70B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5532952565858589}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
grimjim/llama-3-Nephilim-v2.1-8B
df6327cf-82e1-437f-9c9a-c31205452717
0.0.1
hfopenllm_v2/grimjim_llama-3-Nephilim-v2.1-8B/1762652580.186715
1762652580.186715
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
grimjim/llama-3-Nephilim-v2.1-8B
grimjim/llama-3-Nephilim-v2.1-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38950540122430705}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
grimjim/SauerHuatuoSkywork-o1-Llama-3.1-8B
30482674-45a3-4400-84e0-eef215540eb5
0.0.1
hfopenllm_v2/grimjim_SauerHuatuoSkywork-o1-Llama-3.1-8B/1762652580.186095
1762652580.1860962
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
grimjim/SauerHuatuoSkywork-o1-Llama-3.1-8B
grimjim/SauerHuatuoSkywork-o1-Llama-3.1-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5219462138237654}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
grimjim/DeepSauerHuatuoSkywork-R1-o1-Llama-3.1-8B
f7439085-a0c9-4d5b-bd4f-bf1841d5ce02
0.0.1
hfopenllm_v2/grimjim_DeepSauerHuatuoSkywork-R1-o1-Llama-3.1-8B/1762652580.181649
1762652580.18165
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
grimjim/DeepSauerHuatuoSkywork-R1-o1-Llama-3.1-8B
grimjim/DeepSauerHuatuoSkywork-R1-o1-Llama-3.1-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4797060687863757}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
grimjim/llama-3-Nephilim-v1-8B
498c4d5e-0500-42da-9c75-e8da578516f8
0.0.1
hfopenllm_v2/grimjim_llama-3-Nephilim-v1-8B/1762652580.186311
1762652580.186312
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
grimjim/llama-3-Nephilim-v1-8B
grimjim/llama-3-Nephilim-v1-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4277239945566652}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
grimjim/Llama-Nephilim-Metamorphosis-v2-8B
ac20706b-0370-47de-bc6b-ae188f8a9259
0.0.1
hfopenllm_v2/grimjim_Llama-Nephilim-Metamorphosis-v2-8B/1762652580.183682
1762652580.1836832
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
grimjim/Llama-Nephilim-Metamorphosis-v2-8B
grimjim/Llama-Nephilim-Metamorphosis-v2-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4544519652300341}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
grimjim/Llama3.1-SuperNovaLite-HuatuoSkywork-o1-8B
f2fbc411-4a4b-4727-9fdc-eda481f4f10c
0.0.1
hfopenllm_v2/grimjim_Llama3.1-SuperNovaLite-HuatuoSkywork-o1-8B/1762652580.183897
1762652580.183897
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
grimjim/Llama3.1-SuperNovaLite-HuatuoSkywork-o1-8B
grimjim/Llama3.1-SuperNovaLite-HuatuoSkywork-o1-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43659157701565177}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
grimjim/llama-3-Nephilim-v3-8B
ecee6e6a-15a1-4455-9724-34ca14477064
0.0.1
hfopenllm_v2/grimjim_llama-3-Nephilim-v3-8B/1762652580.186964
1762652580.186965
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
grimjim/llama-3-Nephilim-v3-8B
grimjim/llama-3-Nephilim-v3-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4173825449806513}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
grimjim/HuatuoSkywork-o1-Llama-3.1-8B
6a173156-75b3-47f4-9f88-ecace0ee6942
0.0.1
hfopenllm_v2/grimjim_HuatuoSkywork-o1-Llama-3.1-8B/1762652580.182574
1762652580.182574
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
grimjim/HuatuoSkywork-o1-Llama-3.1-8B
grimjim/HuatuoSkywork-o1-Llama-3.1-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3961499931293413}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
grimjim/llama-3-Nephilim-v2-8B
de82dcd9-adae-4b28-8248-156e324e036d
0.0.1
hfopenllm_v2/grimjim_llama-3-Nephilim-v2-8B/1762652580.186511
1762652580.1865118
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
grimjim/llama-3-Nephilim-v2-8B
grimjim/llama-3-Nephilim-v2-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39222817679313116}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
viettelsecurity-ai/security-llama3.2-3b
2176e0d8-e0a5-4118-b15f-b272dc643d89
0.0.1
hfopenllm_v2/viettelsecurity-ai_security-llama3.2-3b/1762652580.588792
1762652580.588792
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
viettelsecurity-ai/security-llama3.2-3b
viettelsecurity-ai/security-llama3.2-3b
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5908888416069362}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 3.213}
HF Open LLM v2
meta
Eurdem/Defne-llama3.1-8B
52eb695b-3d17-4abe-a386-7927348e5dd5
0.0.1
hfopenllm_v2/Eurdem_Defne-llama3.1-8B/1762652579.615498
1762652579.615499
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Eurdem/Defne-llama3.1-8B
Eurdem/Defne-llama3.1-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5036115285220991}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
abhishek/autotrain-llama3-orpo-v2
f8515d35-c7e8-440b-a61f-16f5acfdc003
0.0.1
hfopenllm_v2/abhishek_autotrain-llama3-orpo-v2/1762652579.9735
1762652579.973501
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
abhishek/autotrain-llama3-orpo-v2
abhishek/autotrain-llama3-orpo-v2
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4371656094717572}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
abhishek/autotrain-llama3-70b-orpo-v2
15617903-e280-4c61-a326-5f615b46b3a8
0.0.1
hfopenllm_v2/abhishek_autotrain-llama3-70b-orpo-v2/1762652579.9732742
1762652579.973275
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
abhishek/autotrain-llama3-70b-orpo-v2
abhishek/autotrain-llama3-70b-orpo-v2
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5406055931594835}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
abhishek/autotrain-llama3-70b-orpo-v1
eb2ee4fb-cc98-4937-a385-19a5e783d1a7
0.0.1
hfopenllm_v2/abhishek_autotrain-llama3-70b-orpo-v1/1762652579.973002
1762652579.973003
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
abhishek/autotrain-llama3-70b-orpo-v1
abhishek/autotrain-llama3-70b-orpo-v1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4233023932055834}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
deepseek-ai/DeepSeek-R1-Distill-Llama-8B
650f54ba-4d43-4e31-92cd-16c7c1913b34
0.0.1
hfopenllm_v2/deepseek-ai_DeepSeek-R1-Distill-Llama-8B/1762652580.121731
1762652580.121734
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
deepseek-ai/DeepSeek-R1-Distill-Llama-8B
deepseek-ai/DeepSeek-R1-Distill-Llama-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37823973723054827}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
deepseek-ai/DeepSeek-R1-Distill-Llama-70B
8df04772-fc5c-4dfb-8366-f9844bf52a0e
0.0.1
hfopenllm_v2/deepseek-ai_DeepSeek-R1-Distill-Llama-70B/1762652580.121449
1762652580.12145
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
deepseek-ai/DeepSeek-R1-Distill-Llama-70B
deepseek-ai/DeepSeek-R1-Distill-Llama-70B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43359397509718656}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
mmnga/Llama-3-70B-japanese-suzume-vector-v0.1
56f52103-ea5e-4228-ac7b-3c6929fe5b76
0.0.1
hfopenllm_v2/mmnga_Llama-3-70B-japanese-suzume-vector-v0.1/1762652580.370961
1762652580.370962
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
mmnga/Llama-3-70B-japanese-suzume-vector-v0.1
mmnga/Llama-3-70B-japanese-suzume-vector-v0.1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4648931501748693}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
SentientAGI/Dobby-Mini-Leashed-Llama-3.1-8B
ed1798c0-348f-4294-b546-8a7892225d33
0.0.1
hfopenllm_v2/SentientAGI_Dobby-Mini-Leashed-Llama-3.1-8B/1762652579.878995
1762652579.878996
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
SentientAGI/Dobby-Mini-Leashed-Llama-3.1-8B
SentientAGI/Dobby-Mini-Leashed-Llama-3.1-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7847034756667863}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
SentientAGI/Dobby-Mini-Unhinged-Llama-3.1-8B
6ac51916-9278-46b6-9b0f-059745f3d845
0.0.1
hfopenllm_v2/SentientAGI_Dobby-Mini-Unhinged-Llama-3.1-8B/1762652579.879248
1762652579.879248
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
SentientAGI/Dobby-Mini-Unhinged-Llama-3.1-8B
SentientAGI/Dobby-Mini-Unhinged-Llama-3.1-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7456858912130924}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Sicarius-Prototyping/Brainy_LLAMA
83fd7abf-00b0-4242-b8c3-87ef9c40dfcf
0.0.1
hfopenllm_v2/Sicarius-Prototyping_Brainy_LLAMA/1762652579.880492
1762652579.8804932
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Sicarius-Prototyping/Brainy_LLAMA
Sicarius-Prototyping/Brainy_LLAMA
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5204224790223274}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
HiroseKoichi/Llama-Salad-4x8B-V3
69037dce-5276-4e26-aa05-0a7bd2c4739b
0.0.1
hfopenllm_v2/HiroseKoichi_Llama-Salad-4x8B-V3/1762652579.640251
1762652579.6402519
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
HiroseKoichi/Llama-Salad-4x8B-V3
HiroseKoichi/Llama-Salad-4x8B-V3
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6653523761397536}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 24.942}
HF Open LLM v2
meta
meta-llama/Meta-Llama-3-8B
75f6ae05-a987-455d-8167-fc345d55c370
0.0.1
hfopenllm_v2/meta-llama_Meta-Llama-3-8B/1762652580.352957
1762652580.352957
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
meta-llama/Meta-Llama-3-8B
meta-llama/Meta-Llama-3-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14550614591506092}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
meta-llama/Llama-2-7b-hf
36fbd2e7-97fa-4ba4-aad2-47bfc225771d
0.0.1
hfopenllm_v2/meta-llama_Llama-2-7b-hf/1762652580.350465
1762652580.350466
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
meta-llama/Llama-2-7b-hf
meta-llama/Llama-2-7b-hf
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2518938638368418}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 6.738}
HF Open LLM v2
meta
meta-llama/Llama-3.2-3B
19aba348-6bdd-425a-bd7b-505aa2658f6c
0.0.1
hfopenllm_v2/meta-llama_Llama-3.2-3B/1762652580.351924
1762652580.351925
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
meta-llama/Llama-3.2-3B
meta-llama/Llama-3.2-3B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.13374069690643048}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 3.213}
HF Open LLM v2
meta
meta-llama/Llama-2-70b-hf
70acb3cd-fea6-481a-8bf4-fa72e953c110
0.0.1
hfopenllm_v2/meta-llama_Llama-2-70b-hf/1762652580.3500109
1762652580.3500118
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
meta-llama/Llama-2-70b-hf
meta-llama/Llama-2-70b-hf
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2406780675274937}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 68.977}
HF Open LLM v2
meta
meta-llama/Llama-2-13b-hf
7a0c1d3a-26f5-44d0-8ca1-8ce6db39cb99
0.0.1
hfopenllm_v2/meta-llama_Llama-2-13b-hf/1762652580.3493812
1762652580.349382
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
meta-llama/Llama-2-13b-hf
meta-llama/Llama-2-13b-hf
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24824687385027283}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 13.016}
HF Open LLM v2
meta
meta-llama/Meta-Llama-3-70B
dddadaa0-6808-4b34-a6e2-29663460c3e0
0.0.1
hfopenllm_v2/meta-llama_Meta-Llama-3-70B/1762652580.352541
1762652580.352541
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
meta-llama/Meta-Llama-3-70B
meta-llama/Meta-Llama-3-70B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1603190645265673}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
meta-llama/Llama-3.1-70B
88d33049-cd88-4b4a-94ba-d0c35a635cfc
0.0.1
hfopenllm_v2/meta-llama_Llama-3.1-70B/1762652580.350682
1762652580.350682
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
meta-llama/Llama-3.1-70B
meta-llama/Llama-3.1-70B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16843752354862876}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
meta-llama/Llama-3.1-8B
58e87619-6244-45b9-8a1f-b2f8f0d0cd31
0.0.1
hfopenllm_v2/meta-llama_Llama-3.1-8B/1762652580.351093
1762652580.351093
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
meta-llama/Llama-3.1-8B
meta-llama/Llama-3.1-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.12459828809780273}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
meta-llama/Llama-3.2-1B
b4b6a8d2-be7f-4b8f-b280-3e62015a61d3
0.0.1
hfopenllm_v2/meta-llama_Llama-3.2-1B/1762652580.3515048
1762652580.351506
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
meta-llama/Llama-3.2-1B
meta-llama/Llama-3.2-1B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14777900415342402}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.24}
HF Open LLM v2
meta
BrainWave-ML/llama3.2-3B-maths-orpo
979ef5b7-12cb-4e4d-81c7-9e6fcb1d6cef
0.0.1
hfopenllm_v2/BrainWave-ML_llama3.2-3B-maths-orpo/1762652579.499409
1762652579.49941
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
BrainWave-ML/llama3.2-3B-maths-orpo
BrainWave-ML/llama3.2-3B-maths-orpo
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.20490742341431845}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 3.0}
HF Open LLM v2
meta
fulim/FineLlama-3.1-8B
46fa0a20-2810-4f0b-befe-afc3fc774734
0.0.1
hfopenllm_v2/fulim_FineLlama-3.1-8B/1762652580.162704
1762652580.162705
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
fulim/FineLlama-3.1-8B
fulim/FineLlama-3.1-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14388267574480157}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.0}
HF Open LLM v2
meta
PJMixers/LLaMa-3-CursedStock-v2.0-8B
4f7c69a5-70e5-4f7b-9520-9fa9e642df57
0.0.1
hfopenllm_v2/PJMixers_LLaMa-3-CursedStock-v2.0-8B/1762652579.809348
1762652579.809348
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
PJMixers/LLaMa-3-CursedStock-v2.0-8B
PJMixers/LLaMa-3-CursedStock-v2.0-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6330791189599152}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
migtissera/Llama-3-70B-Synthia-v3.5
7ba5e7cb-3050-4838-8762-4b31a5c9d912
0.0.1
hfopenllm_v2/migtissera_Llama-3-70B-Synthia-v3.5/1762652580.358073
1762652580.3580742
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
migtissera/Llama-3-70B-Synthia-v3.5
migtissera/Llama-3-70B-Synthia-v3.5
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6076499244227538}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 70.554}
HF Open LLM v2
meta
migtissera/Llama-3-8B-Synthia-v3.5
3c843cd0-ce71-4feb-9452-65fc7534518e
0.0.1
hfopenllm_v2/migtissera_Llama-3-8B-Synthia-v3.5/1762652580.358322
1762652580.358322
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
migtissera/Llama-3-8B-Synthia-v3.5
migtissera/Llama-3-8B-Synthia-v3.5
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5069582042314393}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
lightblue/suzume-llama-3-8B-multilingual-orpo-borda-half
90ab1587-99b9-48e1-b3f3-8aaf07313eaa
0.0.1
hfopenllm_v2/lightblue_suzume-llama-3-8B-multilingual-orpo-borda-half/1762652580.3218
1762652580.321801
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
lightblue/suzume-llama-3-8B-multilingual-orpo-borda-half
lightblue/suzume-llama-3-8B-multilingual-orpo-borda-half
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6249107922534431}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
lightblue/suzume-llama-3-8B-multilingual-orpo-borda-full
37aa2a50-974f-4cb0-81e3-f160f08c8a0e
0.0.1
hfopenllm_v2/lightblue_suzume-llama-3-8B-multilingual-orpo-borda-full/1762652580.32158
1762652580.32158
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
lightblue/suzume-llama-3-8B-multilingual-orpo-borda-full
lightblue/suzume-llama-3-8B-multilingual-orpo-borda-full
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5817464327983085}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
lightblue/suzume-llama-3-8B-multilingual-orpo-borda-top25
ebfb14c0-d725-4650-9d04-ed4f7ebaf676
0.0.1
hfopenllm_v2/lightblue_suzume-llama-3-8B-multilingual-orpo-borda-top25/1762652580.322012
1762652580.322013
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
lightblue/suzume-llama-3-8B-multilingual-orpo-borda-top25
lightblue/suzume-llama-3-8B-multilingual-orpo-borda-top25
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6636535503574958}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
lightblue/suzume-llama-3-8B-multilingual-orpo-borda-top75
fcb13fe4-e314-4cdd-ae6e-82531ad6a829
0.0.1
hfopenllm_v2/lightblue_suzume-llama-3-8B-multilingual-orpo-borda-top75/1762652580.322237
1762652580.322238
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
lightblue/suzume-llama-3-8B-multilingual-orpo-borda-top75
lightblue/suzume-llama-3-8B-multilingual-orpo-borda-top75
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6687245397766814}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
lightblue/suzume-llama-3-8B-multilingual
8eaee9b3-78b0-4523-9151-695c27c5cfa7
0.0.1
hfopenllm_v2/lightblue_suzume-llama-3-8B-multilingual/1762652580.321283
1762652580.321284
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
lightblue/suzume-llama-3-8B-multilingual
lightblue/suzume-llama-3-8B-multilingual
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6678003253589365}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
BEE-spoke-data/smol_llama-220M-GQA-fineweb_edu
03c78dad-b50d-4f80-91f8-bd8fbb87235d
0.0.1
hfopenllm_v2/BEE-spoke-data_smol_llama-220M-GQA-fineweb_edu/1762652579.492168
1762652579.492168
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
BEE-spoke-data/smol_llama-220M-GQA-fineweb_edu
BEE-spoke-data/smol_llama-220M-GQA-fineweb_edu
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.19881248420856662}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 0.218}
HF Open LLM v2
meta
BEE-spoke-data/smol_llama-220M-GQA
26596bba-b99d-417f-87be-91de8fa528d3
0.0.1
hfopenllm_v2/BEE-spoke-data_smol_llama-220M-GQA/1762652579.491959
1762652579.49196
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
BEE-spoke-data/smol_llama-220M-GQA
BEE-spoke-data/smol_llama-220M-GQA
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.23860468002677343}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 0.218}
HF Open LLM v2
meta
BEE-spoke-data/smol_llama-220M-openhermes
a0de28f1-8186-4eef-b5b4-ce6da71d8271
0.0.1
hfopenllm_v2/BEE-spoke-data_smol_llama-220M-openhermes/1762652579.4923809
1762652579.492382
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
BEE-spoke-data/smol_llama-220M-openhermes
BEE-spoke-data/smol_llama-220M-openhermes
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1555229014570229}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 0.218}
HF Open LLM v2
meta
BEE-spoke-data/smol_llama-101M-GQA
3c1f129b-4f54-4187-876b-c93942179125
0.0.1
hfopenllm_v2/BEE-spoke-data_smol_llama-101M-GQA/1762652579.491745
1762652579.491746
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
BEE-spoke-data/smol_llama-101M-GQA
BEE-spoke-data/smol_llama-101M-GQA
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.13843712460715346}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 0.101}
HF Open LLM v2
meta
BEE-spoke-data/Meta-Llama-3-8Bee
ae5f1f84-091a-4f80-ae40-92ada7e04f94
0.0.1
hfopenllm_v2/BEE-spoke-data_Meta-Llama-3-8Bee/1762652579.491223
1762652579.491224
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
BEE-spoke-data/Meta-Llama-3-8Bee
BEE-spoke-data/Meta-Llama-3-8Bee
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.19506575885317623}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
EpistemeAI/Fireball-R1.1-Llama-3.1-8B
5938f7d8-dddb-4989-81c6-e57e177e52c9
0.0.1
hfopenllm_v2/EpistemeAI_Fireball-R1.1-Llama-3.1-8B/1762652579.604102
1762652579.604102
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
EpistemeAI/Fireball-R1.1-Llama-3.1-8B
EpistemeAI/Fireball-R1.1-Llama-3.1-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3676234613048932}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
EpistemeAI/Alpaca-Llama3.1-8B
cd4698d8-e9d0-4a00-855a-6e0b9cfc31d8
0.0.1
hfopenllm_v2/EpistemeAI_Alpaca-Llama3.1-8B/1762652579.5979578
1762652579.5979588
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
EpistemeAI/Alpaca-Llama3.1-8B
EpistemeAI/Alpaca-Llama3.1-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.15986914719610634}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.0}
HF Open LLM v2
meta
EpistemeAI/Llama-3.2-3B-Agent007-Coder
ab812077-8d2b-40f8-bc49-65fffd7f6f26
0.0.1
hfopenllm_v2/EpistemeAI_Llama-3.2-3B-Agent007-Coder/1762652579.6043148
1762652579.6043148
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
EpistemeAI/Llama-3.2-3B-Agent007-Coder
EpistemeAI/Llama-3.2-3B-Agent007-Coder
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5399562050913798}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 3.0}
HF Open LLM v2
meta
EpistemeAI/Fireball-Alpaca-Llama3.1.08-8B-Philos-C-R2
60d939fa-9ae2-4226-a955-d586c27fea68
0.0.1
hfopenllm_v2/EpistemeAI_Fireball-Alpaca-Llama3.1.08-8B-Philos-C-R2/1762652579.600828
1762652579.600829
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
EpistemeAI/Fireball-Alpaca-Llama3.1.08-8B-Philos-C-R2
EpistemeAI/Fireball-Alpaca-Llama3.1.08-8B-Philos-C-R2
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46731561146646455}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.0}
HF Open LLM v2
meta
EpistemeAI/Fireball-R1-Llama-3.1-8B
85ff1b65-eade-4d70-a278-99605f324e5a
0.0.1
hfopenllm_v2/EpistemeAI_Fireball-R1-Llama-3.1-8B/1762652579.603668
1762652579.603669
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
EpistemeAI/Fireball-R1-Llama-3.1-8B
EpistemeAI/Fireball-R1-Llama-3.1-8B
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4427363839058143}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
EpistemeAI/OpenReasoner-Llama-3.2-3B-rs1.0
610f3053-b2a9-45a8-ac09-af3edcb8c826
0.0.1
hfopenllm_v2/EpistemeAI_OpenReasoner-Llama-3.2-3B-rs1.0/1762652579.604741
1762652579.6047418
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
EpistemeAI/OpenReasoner-Llama-3.2-3B-rs1.0
EpistemeAI/OpenReasoner-Llama-3.2-3B-rs1.0
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7274010735958367}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 3.213}
HF Open LLM v2
meta
EpistemeAI/Reasoning-Llama-3.1-CoT-RE1-NMT-V2-ORPO
14560449-0481-4346-aab2-ff75fdab691b
0.0.1
hfopenllm_v2/EpistemeAI_Reasoning-Llama-3.1-CoT-RE1-NMT-V2-ORPO/1762652579.606164
1762652579.606165
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
EpistemeAI/Reasoning-Llama-3.1-CoT-RE1-NMT-V2-ORPO
EpistemeAI/Reasoning-Llama-3.1-CoT-RE1-NMT-V2-ORPO
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4553263119633683}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
EpistemeAI/Fireball-Alpaca-Llama3.1.07-8B-Philos-Math-KTO-beta
88e9cdd1-ad46-4ad0-9e9b-d872cdb63257
0.0.1
hfopenllm_v2/EpistemeAI_Fireball-Alpaca-Llama3.1.07-8B-Philos-Math-KTO-beta/1762652579.600618
1762652579.600619
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
EpistemeAI/Fireball-Alpaca-Llama3.1.07-8B-Philos-Math-KTO-beta
EpistemeAI/Fireball-Alpaca-Llama3.1.07-8B-Philos-Math-KTO-beta
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7274010735958367}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.0}
HF Open LLM v2
meta
EpistemeAI/Fireball-R1-Llama-3.1-8B-Medical-COT
1bfd3789-e95b-487c-9c8a-516c017f6558
0.0.1
hfopenllm_v2/EpistemeAI_Fireball-R1-Llama-3.1-8B-Medical-COT/1762652579.603883
1762652579.603883
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
EpistemeAI/Fireball-R1-Llama-3.1-8B-Medical-COT
EpistemeAI/Fireball-R1-Llama-3.1-8B-Medical-COT
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3216111029845255}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
EpistemeAI/Reasoning-Llama-3.1-CoT-RE1-NMT
807ed760-775e-4082-90ea-7b524038bebf
0.0.1
hfopenllm_v2/EpistemeAI_Reasoning-Llama-3.1-CoT-RE1-NMT/1762652579.6059399
1762652579.605941
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
EpistemeAI/Reasoning-Llama-3.1-CoT-RE1-NMT
EpistemeAI/Reasoning-Llama-3.1-CoT-RE1-NMT
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4828532737580731}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
cstr/llama3.1-8b-spaetzle-v90
73270182-a54d-4fc5-834a-89283677c1af
0.0.1
hfopenllm_v2/cstr_llama3.1-8b-spaetzle-v90/1762652580.117986
1762652580.1179872
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
cstr/llama3.1-8b-spaetzle-v90
cstr/llama3.1-8b-spaetzle-v90
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7356192679867197}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Kukedlc/NeuralLLaMa-3-8b-DT-v0.1
ec1bea6a-91e2-41c9-ab54-af84bf1a1d15
0.0.1
hfopenllm_v2/Kukedlc_NeuralLLaMa-3-8b-DT-v0.1/1762652579.7021902
1762652579.702191
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Kukedlc/NeuralLLaMa-3-8b-DT-v0.1
Kukedlc/NeuralLLaMa-3-8b-DT-v0.1
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4371412297149342}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
Kukedlc/NeuralLLaMa-3-8b-ORPO-v0.3
02d060d9-d545-445b-8d22-4ae117b8f324
0.0.1
hfopenllm_v2/Kukedlc_NeuralLLaMa-3-8b-ORPO-v0.3/1762652579.7024388
1762652579.70244
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Kukedlc/NeuralLLaMa-3-8b-ORPO-v0.3
Kukedlc/NeuralLLaMa-3-8b-ORPO-v0.3
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5275912356990563}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
meta
IntervitensInc/internlm2_5-20b-llamafied
5be7b084-b018-457a-a5d6-c9e3e9d3f70e
0.0.1
hfopenllm_v2/IntervitensInc_internlm2_5-20b-llamafied/1762652579.6480021
1762652579.648003
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
IntervitensInc/internlm2_5-20b-llamafied
IntervitensInc/internlm2_5-20b-llamafied
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3409952260003457}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 19.861}
HF Open LLM v2
meta
cluebbers/Llama-3.1-8B-paraphrase-type-generation-apty-ipo
e89bbd89-f8fa-4156-94d8-6f390a383557
0.0.1
hfopenllm_v2/cluebbers_Llama-3.1-8B-paraphrase-type-generation-apty-ipo/1762652580.109549
1762652580.1095521
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
cluebbers/Llama-3.1-8B-paraphrase-type-generation-apty-ipo
cluebbers/Llama-3.1-8B-paraphrase-type-generation-apty-ipo
meta
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1326668794354535}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}