_leaderboard
stringclasses
1 value
_developer
stringclasses
559 values
_model
stringlengths
9
102
_uuid
stringlengths
36
36
schema_version
stringclasses
1 value
evaluation_id
stringlengths
35
133
retrieved_timestamp
stringlengths
13
18
source_data
stringclasses
1 value
evaluation_source_name
stringclasses
1 value
evaluation_source_type
stringclasses
1 value
source_organization_name
stringclasses
1 value
source_organization_url
null
source_organization_logo_url
null
evaluator_relationship
stringclasses
1 value
model_name
stringlengths
4
102
model_id
stringlengths
9
102
model_developer
stringclasses
559 values
model_inference_platform
stringclasses
1 value
evaluation_results
stringlengths
1.35k
1.41k
additional_details
stringclasses
660 values
HF Open LLM v2
microsoft
theprint/phi-3-mini-4k-python
f017d759-59fe-42a3-947d-a4b787f084d7
0.0.1
hfopenllm_v2/theprint_phi-3-mini-4k-python/1762652580.5645702
1762652580.564571
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
theprint/phi-3-mini-4k-python
theprint/phi-3-mini-4k-python
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24087753826513653}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "?", "params_billions": 4.132}
HF Open LLM v2
microsoft
microsoft/Phi-3.5-mini-instruct
42448d73-f9e0-4eb2-bd6a-74614d08d55c
0.0.1
hfopenllm_v2/microsoft_Phi-3.5-mini-instruct/1762652580.356627
1762652580.356628
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
microsoft/Phi-3.5-mini-instruct
microsoft/Phi-3.5-mini-instruct
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5774500547436359}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Phi3ForCausalLM", "params_billions": 3.821}
HF Open LLM v2
microsoft
EpistemeAI2/Fireball-Phi-3-medium-4k-inst-Philos
a60477a1-b815-4c82-a9e9-f017cb7b5ec9
0.0.1
hfopenllm_v2/EpistemeAI2_Fireball-Phi-3-medium-4k-inst-Philos/1762652579.612791
1762652579.612792
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
EpistemeAI2/Fireball-Phi-3-medium-4k-inst-Philos
EpistemeAI2/Fireball-Phi-3-medium-4k-inst-Philos
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5312880933700359}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 13.96}
HF Open LLM v2
microsoft
1024m/PHI-4-Hindi
29f2c6ef-0685-43f9-800b-4f10ddc3ddf7
0.0.1
hfopenllm_v2/1024m_PHI-4-Hindi/1762652579.468371
1762652579.4683719
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
1024m/PHI-4-Hindi
1024m/PHI-4-Hindi
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.00816832670647216}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 14.66}
HF Open LLM v2
microsoft
mlabonne/phixtral-2x2_8
ec051c9b-9399-4c8d-8710-6a182a234890
0.0.1
hfopenllm_v2/mlabonne_phixtral-2x2_8/1762652580.370162
1762652580.370163
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
mlabonne/phixtral-2x2_8
mlabonne/phixtral-2x2_8
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3431184811854767}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "PhiForCausalLM", "params_billions": 4.458}
HF Open LLM v2
microsoft
Quazim0t0/CoT_Phi
ed579ba1-fcd3-4279-ac93-d0340a771e43
0.0.1
hfopenllm_v2/Quazim0t0_CoT_Phi/1762652579.820767
1762652579.820768
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Quazim0t0/CoT_Phi
Quazim0t0/CoT_Phi
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6158681188136367}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 14.66}
HF Open LLM v2
microsoft
Quazim0t0/Math_Phi4_Reason
1c2a87ca-9f1a-4d32-b1da-743927b722b0
0.0.1
hfopenllm_v2/Quazim0t0_Math_Phi4_Reason/1762652579.826147
1762652579.826147
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Quazim0t0/Math_Phi4_Reason
Quazim0t0/Math_Phi4_Reason
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3220111526305758}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 14.66}
HF Open LLM v2
microsoft
Quazim0t0/Lo-Phi-14b
b37d3d27-5ba0-44d6-bd19-1196a98b75b4
0.0.1
hfopenllm_v2/Quazim0t0_Lo-Phi-14b/1762652579.825307
1762652579.8253078
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Quazim0t0/Lo-Phi-14b
Quazim0t0/Lo-Phi-14b
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4941189377518318}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 14.66}
HF Open LLM v2
microsoft
Quazim0t0/ThinkPhi1.1-Tensors
056e62d9-ab3e-4bf3-8693-47a5aea7f84f
0.0.1
hfopenllm_v2/Quazim0t0_ThinkPhi1.1-Tensors/1762652579.831269
1762652579.831269
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Quazim0t0/ThinkPhi1.1-Tensors
Quazim0t0/ThinkPhi1.1-Tensors
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3907543096761038}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 14.66}
HF Open LLM v2
microsoft
Quazim0t0/Phi4.Turn.R1Distill.16bit
44749932-f3e3-45ad-bb4b-135a6d656e3b
0.0.1
hfopenllm_v2/Quazim0t0_Phi4.Turn.R1Distill.16bit/1762652579.8283992
1762652579.8283992
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Quazim0t0/Phi4.Turn.R1Distill.16bit
Quazim0t0/Phi4.Turn.R1Distill.16bit
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31264378515671754}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 14.66}
HF Open LLM v2
microsoft
Quazim0t0/Phi4Basis-14B-sce
d101111a-31bd-4eec-9a53-52543f6d5fd5
0.0.1
hfopenllm_v2/Quazim0t0_Phi4Basis-14B-sce/1762652579.828811
1762652579.8288121
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Quazim0t0/Phi4Basis-14B-sce
Quazim0t0/Phi4Basis-14B-sce
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6501648958097848}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 14.66}
HF Open LLM v2
microsoft
Quazim0t0/graphite-14b-sce
bd98b886-a899-4022-aee4-09ea0e491fe3
0.0.1
hfopenllm_v2/Quazim0t0_graphite-14b-sce/1762652579.833386
1762652579.833387
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Quazim0t0/graphite-14b-sce
Quazim0t0/graphite-14b-sce
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3216864585965239}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 14.66}
HF Open LLM v2
microsoft
Quazim0t0/Phi4.Turn.R1Distill_v1.5.1-Tensors
5f1b91c8-28d0-4274-8979-32416003fafb
0.0.1
hfopenllm_v2/Quazim0t0_Phi4.Turn.R1Distill_v1.5.1-Tensors/1762652579.8286002
1762652579.8286011
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Quazim0t0/Phi4.Turn.R1Distill_v1.5.1-Tensors
Quazim0t0/Phi4.Turn.R1Distill_v1.5.1-Tensors
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2995296923274689}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 14.66}
HF Open LLM v2
microsoft
microsoft/Phi-3-mini-128k-instruct
0bcfeb34-8944-4f16-83d8-6fe851c39af6
0.0.1
hfopenllm_v2/microsoft_Phi-3-mini-128k-instruct/1762652580.355347
1762652580.3553479
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
microsoft/Phi-3-mini-128k-instruct
microsoft/Phi-3-mini-128k-instruct
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5976331688807919}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Phi3ForCausalLM", "params_billions": 3.821}
HF Open LLM v2
microsoft
pankajmathur/orca_mini_phi-4
f5971ede-de93-4729-8a03-b9ec3abea21e
0.0.1
hfopenllm_v2/pankajmathur_orca_mini_phi-4/1762652580.435327
1762652580.435328
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
pankajmathur/orca_mini_phi-4
pankajmathur/orca_mini_phi-4
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7780588837617521}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 14.66}
HF Open LLM v2
microsoft
ehristoforu/phi-4-25b
d11d7e47-f9e0-4502-9e71-0654819c3cd4
0.0.1
hfopenllm_v2/ehristoforu_phi-4-25b/1762652580.144644
1762652580.1446452
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
ehristoforu/phi-4-25b
ehristoforu/phi-4-25b
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6483663346587056}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Phi3ForCausalLM", "params_billions": 24.883}
HF Open LLM v2
microsoft
ehristoforu/ruphi-4b
70337ca5-7810-4e52-8382-0c2568a6ab70
0.0.1
hfopenllm_v2/ehristoforu_ruphi-4b/1762652580.1457548
1762652580.145756
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
ehristoforu/ruphi-4b
ehristoforu/ruphi-4b
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17518185082248433}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "Phi3ForCausalLM", "params_billions": 3.821}
HF Open LLM v2
microsoft
Daemontatox/TinySphinx2.0
da5d131c-5ae9-462e-87b1-92ead75eddb9
0.0.1
hfopenllm_v2/Daemontatox_TinySphinx2.0/1762652579.531743
1762652579.531744
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Daemontatox/TinySphinx2.0
Daemontatox/TinySphinx2.0
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25351733400710114}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 0.247}
HF Open LLM v2
microsoft
Daemontatox/Phi-4-COT
4ab23cde-aadb-424d-a88e-e7029a2f5c57
0.0.1
hfopenllm_v2/Daemontatox_Phi-4-COT/1762652579.5296152
1762652579.5296159
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Daemontatox/Phi-4-COT
Daemontatox/Phi-4-COT
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17930313789633728}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 14.66}
HF Open LLM v2
microsoft
Daemontatox/Sphinx2.0
07d85f99-840b-403a-bace-99712f3469b7
0.0.1
hfopenllm_v2/Daemontatox_Sphinx2.0/1762652579.531323
1762652579.531324
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Daemontatox/Sphinx2.0
Daemontatox/Sphinx2.0
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7123133286346892}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.77}
HF Open LLM v2
microsoft
Daemontatox/TinySphinx
6d501ffa-e205-4522-9af5-7036463a5b05
0.0.1
hfopenllm_v2/Daemontatox_TinySphinx/1762652579.5315351
1762652579.5315359
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Daemontatox/TinySphinx
Daemontatox/TinySphinx
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2566900269063862}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 0.247}
HF Open LLM v2
microsoft
Daemontatox/SphinX
118ee97a-cc78-4b4d-99c4-58d37b4a48ba
0.0.1
hfopenllm_v2/Daemontatox_SphinX/1762652579.531104
1762652579.531104
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Daemontatox/SphinX
Daemontatox/SphinX
microsoft
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5725042886208593}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
HF Open LLM v2
PygmalionAI
PygmalionAI/pygmalion-6b
7cdfef58-c871-4158-b97d-ed843f7d667b
0.0.1
hfopenllm_v2/PygmalionAI_pygmalion-6b/1762652579.818316
1762652579.8183172
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
PygmalionAI/pygmalion-6b
PygmalionAI/pygmalion-6b
PygmalionAI
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.20910406610016974}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "GPTJForCausalLM", "params_billions": 6.0}
HF Open LLM v2
sonthenguyen
sonthenguyen/zephyr-sft-bnb-4bit-DPO-mtbr-180steps
9fa1bbeb-ec5c-4d53-b2f3-eefa660bee5e
0.0.1
hfopenllm_v2/sonthenguyen_zephyr-sft-bnb-4bit-DPO-mtbr-180steps/1762652580.5327501
1762652580.532751
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
sonthenguyen/zephyr-sft-bnb-4bit-DPO-mtbr-180steps
sonthenguyen/zephyr-sft-bnb-4bit-DPO-mtbr-180steps
sonthenguyen
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4032190144372487}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
sonthenguyen
sonthenguyen/zephyr-sft-bnb-4bit-DPO-mtbc-213steps
aabf8b57-c3fd-494b-b8e3-7ff1bdb0a15b
0.0.1
hfopenllm_v2/sonthenguyen_zephyr-sft-bnb-4bit-DPO-mtbc-213steps/1762652580.532313
1762652580.532314
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
sonthenguyen/zephyr-sft-bnb-4bit-DPO-mtbc-213steps
sonthenguyen/zephyr-sft-bnb-4bit-DPO-mtbc-213steps
sonthenguyen
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4275489035758454}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
sonthenguyen
sonthenguyen/ft-unsloth-zephyr-sft-bnb-4bit-20241014-170522
1e66ee5b-d3e7-4e2e-8a6f-d098938d4afd
0.0.1
hfopenllm_v2/sonthenguyen_ft-unsloth-zephyr-sft-bnb-4bit-20241014-170522/1762652580.532109
1762652580.53211
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
sonthenguyen/ft-unsloth-zephyr-sft-bnb-4bit-20241014-170522
sonthenguyen/ft-unsloth-zephyr-sft-bnb-4bit-20241014-170522
sonthenguyen
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37644117607946914}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "?", "params_billions": 7.723}
HF Open LLM v2
sonthenguyen
sonthenguyen/zephyr-sft-bnb-4bit-DPO-mtbo-180steps
dd216882-a64e-4a0e-8fdc-ff5f99639566
0.0.1
hfopenllm_v2/sonthenguyen_zephyr-sft-bnb-4bit-DPO-mtbo-180steps/1762652580.532533
1762652580.5325341
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
sonthenguyen/zephyr-sft-bnb-4bit-DPO-mtbo-180steps
sonthenguyen/zephyr-sft-bnb-4bit-DPO-mtbo-180steps
sonthenguyen
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40871443325930756}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
sonthenguyen
sonthenguyen/ft-unsloth-zephyr-sft-bnb-4bit-20241014-164205
c9e9de59-9ec8-4ca9-8869-f77cac14f3ed
0.0.1
hfopenllm_v2/sonthenguyen_ft-unsloth-zephyr-sft-bnb-4bit-20241014-164205/1762652580.531905
1762652580.5319061
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
sonthenguyen/ft-unsloth-zephyr-sft-bnb-4bit-20241014-164205
sonthenguyen/ft-unsloth-zephyr-sft-bnb-4bit-20241014-164205
sonthenguyen
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3199377651298555}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "?", "params_billions": 7.723}
HF Open LLM v2
sonthenguyen
sonthenguyen/ft-unsloth-zephyr-sft-bnb-4bit-20241014-161415
7aa22e01-efb1-46f3-aad6-cc1fcb2c3783
0.0.1
hfopenllm_v2/sonthenguyen_ft-unsloth-zephyr-sft-bnb-4bit-20241014-161415/1762652580.531641
1762652580.5316422
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
sonthenguyen/ft-unsloth-zephyr-sft-bnb-4bit-20241014-161415
sonthenguyen/ft-unsloth-zephyr-sft-bnb-4bit-20241014-161415
sonthenguyen
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28933784580468713}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "?", "params_billions": 7.723}
HF Open LLM v2
waqasali1707
waqasali1707/Beast-Soul-new
c04bef75-d3cc-463e-ac24-a2b18d3611af
0.0.1
hfopenllm_v2/waqasali1707_Beast-Soul-new/1762652580.592428
1762652580.592428
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
waqasali1707/Beast-Soul-new
waqasali1707/Beast-Soul-new
waqasali1707
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5029865202108184}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
zelk12
zelk12/Test01012025155054
e25f6fa3-238e-4bc3-b6ce-cdc2bc728d9c
0.0.1
hfopenllm_v2/zelk12_Test01012025155054/1762652580.6282592
1762652580.6282601
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
zelk12/Test01012025155054
zelk12/Test01012025155054
zelk12
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1555229014570229}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Gemma2ForCausalLM", "params_billions": 3.817}
HF Open LLM v2
theprint
theprint/CleverBoi-Llama-3.1-8B-Instruct
86d3bb20-09a5-4ec0-a473-14a3e3c5a402
0.0.1
hfopenllm_v2/theprint_CleverBoi-Llama-3.1-8B-Instruct/1762652580.5606558
1762652580.5606568
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
theprint/CleverBoi-Llama-3.1-8B-Instruct
theprint/CleverBoi-Llama-3.1-8B-Instruct
theprint
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16816269719898758}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "?", "params_billions": 16.061}
HF Open LLM v2
theprint
theprint/ReWiz-Nemo-12B-Instruct
92999dc0-7075-44ee-be68-1ec32ab5645d
0.0.1
hfopenllm_v2/theprint_ReWiz-Nemo-12B-Instruct/1762652580.563264
1762652580.563264
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
theprint/ReWiz-Nemo-12B-Instruct
theprint/ReWiz-Nemo-12B-Instruct
theprint
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.10623811486854878}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
theprint
theprint/WorldBuilder-12B
f1107803-5a3b-4fcc-b948-ff622b5f26da
0.0.1
hfopenllm_v2/theprint_WorldBuilder-12B/1762652580.564255
1762652580.564256
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
theprint/WorldBuilder-12B
theprint/WorldBuilder-12B
theprint
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.13743755457741016}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "?", "params_billions": 13.933}
HF Open LLM v2
theprint
theprint/CleverBoi-7B-v3
4634b7d7-110e-422c-af60-80cd9df06dac
0.0.1
hfopenllm_v2/theprint_CleverBoi-7B-v3/1762652580.560437
1762652580.560438
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
theprint/CleverBoi-7B-v3
theprint/CleverBoi-7B-v3
theprint
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.23823011830831084}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "?", "params_billions": 7.736}
HF Open LLM v2
theprint
theprint/RuDolph-Hermes-7B
22bab713-09d7-471a-b077-cb8c336ba151
0.0.1
hfopenllm_v2/theprint_RuDolph-Hermes-7B/1762652580.564037
1762652580.5640378
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
theprint/RuDolph-Hermes-7B
theprint/RuDolph-Hermes-7B
theprint
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3604292167005767}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
theprint
theprint/Boptruth-Agatha-7B
0d1c0e64-8a5a-4797-9234-91a4f1726171
0.0.1
hfopenllm_v2/theprint_Boptruth-Agatha-7B/1762652580.559956
1762652580.559957
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
theprint/Boptruth-Agatha-7B
theprint/Boptruth-Agatha-7B
theprint
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.312418826491487}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH"...
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
theprint
theprint/ReWiz-7B
b6f50cef-72b3-414c-a33a-a2c8b2af18c0
0.0.1
hfopenllm_v2/theprint_ReWiz-7B/1762652580.562494
1762652580.562496
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
theprint/ReWiz-7B
theprint/ReWiz-7B
theprint
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40479261692309737}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "?", "params_billions": 7.736}
HF Open LLM v2
theprint
theprint/CleverBoi-Nemo-12B-v2
3ac95acf-830a-48ca-a144-42b610558062
0.0.1
hfopenllm_v2/theprint_CleverBoi-Nemo-12B-v2/1762652580.561142
1762652580.561143
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
theprint/CleverBoi-Nemo-12B-v2
theprint/CleverBoi-Nemo-12B-v2
theprint
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2045827293802666}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "?", "params_billions": 13.933}
HF Open LLM v2
theprint
theprint/CleverBoi-7B-v2
0ef8de5e-4e2f-4d74-9267-e953375dbdf4
0.0.1
hfopenllm_v2/theprint_CleverBoi-7B-v2/1762652580.56022
1762652580.560221
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
theprint/CleverBoi-7B-v2
theprint/CleverBoi-7B-v2
theprint
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.21699756645700075}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "?", "params_billions": 7.736}
HF Open LLM v2
theprint
theprint/ReWiz-Worldbuilder-7B
cf71c265-ef73-4410-a2bc-ce9702cfbcee
0.0.1
hfopenllm_v2/theprint_ReWiz-Worldbuilder-7B/1762652580.563769
1762652580.56377
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
theprint/ReWiz-Worldbuilder-7B
theprint/ReWiz-Worldbuilder-7B
theprint
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25101951710350756}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.248}
HF Open LLM v2
Goekdeniz-Guelmez
Goekdeniz-Guelmez/Josiefied-Qwen2.5-1.5B-Instruct-abliterated-v2
baae7cee-8b76-456f-96dc-5ac900a9a36e
0.0.1
hfopenllm_v2/Goekdeniz-Guelmez_Josiefied-Qwen2.5-1.5B-Instruct-abliterated-v2/1762652579.629877
1762652579.629878
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Goekdeniz-Guelmez/Josiefied-Qwen2.5-1.5B-Instruct-abliterated-v2
Goekdeniz-Guelmez/Josiefied-Qwen2.5-1.5B-Instruct-abliterated-v2
Goekdeniz-Guelmez
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.421553699738915}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH"...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
HF Open LLM v2
Goekdeniz-Guelmez
Goekdeniz-Guelmez/j.o.s.i.e.v4o-1.5b-dpo-stage1-v1
b6bf7c36-006c-4256-a315-1de70e2540c3
0.0.1
hfopenllm_v2/Goekdeniz-Guelmez_j.o.s.i.e.v4o-1.5b-dpo-stage1-v1/1762652579.631213
1762652579.631215
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Goekdeniz-Guelmez/j.o.s.i.e.v4o-1.5b-dpo-stage1-v1
Goekdeniz-Guelmez/j.o.s.i.e.v4o-1.5b-dpo-stage1-v1
Goekdeniz-Guelmez
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41883092417009093}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
HF Open LLM v2
Goekdeniz-Guelmez
Goekdeniz-Guelmez/Josiefied-Qwen2.5-14B-Instruct-abliterated-v4
af440c67-78de-4053-98d8-8cded9657860
0.0.1
hfopenllm_v2/Goekdeniz-Guelmez_Josiefied-Qwen2.5-14B-Instruct-abliterated-v4/1762652579.6304152
1762652579.630416
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Goekdeniz-Guelmez/Josiefied-Qwen2.5-14B-Instruct-abliterated-v4
Goekdeniz-Guelmez/Josiefied-Qwen2.5-14B-Instruct-abliterated-v4
Goekdeniz-Guelmez
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8291666112581284}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.77}
HF Open LLM v2
Goekdeniz-Guelmez
Goekdeniz-Guelmez/Josiefied-Qwen2.5-0.5B-Instruct-abliterated-v1
1b9a4b84-1766-49ca-bd11-17a2340b9736
0.0.1
hfopenllm_v2/Goekdeniz-Guelmez_Josiefied-Qwen2.5-0.5B-Instruct-abliterated-v1/1762652579.6293938
1762652579.629396
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Goekdeniz-Guelmez/Josiefied-Qwen2.5-0.5B-Instruct-abliterated-v1
Goekdeniz-Guelmez/Josiefied-Qwen2.5-0.5B-Instruct-abliterated-v1
Goekdeniz-Guelmez
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3416944817528602}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 0.63}
HF Open LLM v2
Goekdeniz-Guelmez
Goekdeniz-Guelmez/Josiefied-Qwen2.5-0.5B-Instruct-abliterated-v1
235adbd2-8128-4428-af57-8d8e310ba56f
0.0.1
hfopenllm_v2/Goekdeniz-Guelmez_Josiefied-Qwen2.5-0.5B-Instruct-abliterated-v1/1762652579.629041
1762652579.629042
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Goekdeniz-Guelmez/Josiefied-Qwen2.5-0.5B-Instruct-abliterated-v1
Goekdeniz-Guelmez/Josiefied-Qwen2.5-0.5B-Instruct-abliterated-v1
Goekdeniz-Guelmez
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.347189900574919}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH"...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 0.63}
HF Open LLM v2
Goekdeniz-Guelmez
Goekdeniz-Guelmez/josie-3b-v6.0
89947a58-5e39-468e-bbbc-2f3556a1c8f1
0.0.1
hfopenllm_v2/Goekdeniz-Guelmez_josie-3b-v6.0/1762652579.631514
1762652579.6315148
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Goekdeniz-Guelmez/josie-3b-v6.0
Goekdeniz-Guelmez/josie-3b-v6.0
Goekdeniz-Guelmez
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6009554648333089}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 3.086}
HF Open LLM v2
Goekdeniz-Guelmez
Goekdeniz-Guelmez/Josiefied-Qwen2.5-1.5B-Instruct-abliterated-v3
9363a90d-6ec7-4de2-af17-a3e3e25de7d9
0.0.1
hfopenllm_v2/Goekdeniz-Guelmez_Josiefied-Qwen2.5-1.5B-Instruct-abliterated-v3/1762652579.630181
1762652579.6301818
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Goekdeniz-Guelmez/Josiefied-Qwen2.5-1.5B-Instruct-abliterated-v3
Goekdeniz-Guelmez/Josiefied-Qwen2.5-1.5B-Instruct-abliterated-v3
Goekdeniz-Guelmez
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42525055740989465}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
HF Open LLM v2
Goekdeniz-Guelmez
Goekdeniz-Guelmez/josie-7b-v6.0
aa158f5d-94a5-4f40-8a65-87fe9605abc1
0.0.1
hfopenllm_v2/Goekdeniz-Guelmez_josie-7b-v6.0/1762652579.631763
1762652579.631764
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Goekdeniz-Guelmez/josie-7b-v6.0
Goekdeniz-Guelmez/josie-7b-v6.0
Goekdeniz-Guelmez
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7411645544931892}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
HF Open LLM v2
Goekdeniz-Guelmez
Goekdeniz-Guelmez/Josiefied-Qwen2.5-7B-Instruct-abliterated-v2
9c443687-99df-4cd9-8e19-d40cd83b30bc
0.0.1
hfopenllm_v2/Goekdeniz-Guelmez_Josiefied-Qwen2.5-7B-Instruct-abliterated-v2/1762652579.630644
1762652579.630645
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Goekdeniz-Guelmez/Josiefied-Qwen2.5-7B-Instruct-abliterated-v2
Goekdeniz-Guelmez/Josiefied-Qwen2.5-7B-Instruct-abliterated-v2
Goekdeniz-Guelmez
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7813811797142693}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
HF Open LLM v2
Goekdeniz-Guelmez
Goekdeniz-Guelmez/josie-7b-v6.0-step2000
90d4e4e1-2185-4d21-8730-f1a4bf413157
0.0.1
hfopenllm_v2/Goekdeniz-Guelmez_josie-7b-v6.0-step2000/1762652579.632
1762652579.632001
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Goekdeniz-Guelmez/josie-7b-v6.0-step2000
Goekdeniz-Guelmez/josie-7b-v6.0-step2000
Goekdeniz-Guelmez
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7627716680629618}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
HF Open LLM v2
Goekdeniz-Guelmez
Goekdeniz-Guelmez/josie-7b-v6.0-step2000
7c2cc003-fab3-4fc9-a6b6-fb7075261e50
0.0.1
hfopenllm_v2/Goekdeniz-Guelmez_josie-7b-v6.0-step2000/1762652579.6322381
1762652579.632239
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Goekdeniz-Guelmez/josie-7b-v6.0-step2000
Goekdeniz-Guelmez/josie-7b-v6.0-step2000
Goekdeniz-Guelmez
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7597740661444966}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
HF Open LLM v2
Goekdeniz-Guelmez
Goekdeniz-Guelmez/Josiefied-Qwen2.5-1.5B-Instruct-abliterated-v1
a82acc9c-4093-4e0d-a862-7d6eb3cb7146
0.0.1
hfopenllm_v2/Goekdeniz-Guelmez_Josiefied-Qwen2.5-1.5B-Instruct-abliterated-v1/1762652579.629639
1762652579.6296399
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Goekdeniz-Guelmez/Josiefied-Qwen2.5-1.5B-Instruct-abliterated-v1
Goekdeniz-Guelmez/Josiefied-Qwen2.5-1.5B-Instruct-abliterated-v1
Goekdeniz-Guelmez
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47685806992114255}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.777}
HF Open LLM v2
cjvt
cjvt/GaMS-1B
e9acbb25-2b96-4a2a-92ff-d2b68c0e49f8
0.0.1
hfopenllm_v2/cjvt_GaMS-1B/1762652580.101496
1762652580.1014972
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
cjvt/GaMS-1B
cjvt/GaMS-1B
cjvt
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.163541625110263}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH"...
{"precision": "float16", "architecture": "OPTForCausalLM", "params_billions": 1.54}
HF Open LLM v2
open-neo
open-neo/Kyro-n1-7B
f69621cf-6e46-4805-b8f2-d7a7cba3a0e4
0.0.1
hfopenllm_v2/open-neo_Kyro-n1-7B/1762652580.42885
1762652580.42885
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
open-neo/Kyro-n1-7B
open-neo/Kyro-n1-7B
open-neo
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5572669406064796}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
HF Open LLM v2
open-neo
open-neo/Kyro-n1-3B
0a8b6c55-da69-4f4d-98cc-9d3f5b82d9e2
0.0.1
hfopenllm_v2/open-neo_Kyro-n1-3B/1762652580.428618
1762652580.428618
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
open-neo/Kyro-n1-3B
open-neo/Kyro-n1-3B
open-neo
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45949746672163194}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 3.086}
HF Open LLM v2
Syed-Hasan-8503
Syed-Hasan-8503/Phi-3-mini-4K-instruct-cpo-simpo
58bacacb-2936-4685-b0ba-dc8f47f3232a
0.0.1
hfopenllm_v2/Syed-Hasan-8503_Phi-3-mini-4K-instruct-cpo-simpo/1762652579.896852
1762652579.896853
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Syed-Hasan-8503/Phi-3-mini-4K-instruct-cpo-simpo
Syed-Hasan-8503/Phi-3-mini-4K-instruct-cpo-simpo
Syed-Hasan-8503
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5714049832222946}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Phi3ForCausalLM", "params_billions": 3.821}
HF Open LLM v2
Q-bert
Q-bert/MetaMath-1B
713b1c64-9637-4d83-aee9-f81988fec0b5
0.0.1
hfopenllm_v2/Q-bert_MetaMath-1B/1762652579.8185658
1762652579.8185658
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Q-bert/MetaMath-1B
Q-bert/MetaMath-1B
Q-bert
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5300391849182392}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 1.236}
HF Open LLM v2
apple
apple/DCLM-7B
3891ad0a-0acf-4d3e-a9e8-533633d9557a
0.0.1
hfopenllm_v2/apple_DCLM-7B/1762652580.0138528
1762652580.013854
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
apple/DCLM-7B
apple/DCLM-7B
apple
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.21727239280664196}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "OpenLMModel", "params_billions": 7.0}
HF Open LLM v2
LLM360
LLM360/K2
4b1e267f-90c4-403a-a7cd-5c006153408b
0.0.1
hfopenllm_v2/LLM360_K2/1762652579.706215
1762652579.7062159
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
LLM360/K2
LLM360/K2
LLM360
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2252157608478836}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 65.286}
HF Open LLM v2
LLM360
LLM360/K2-Chat
f7e7c296-74f4-49fa-946d-142341749355
0.0.1
hfopenllm_v2/LLM360_K2-Chat/1762652579.706591
1762652579.706592
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
LLM360/K2-Chat
LLM360/K2-Chat
LLM360
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5151763986223221}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 65.286}
HF Open LLM v2
chujiezheng
chujiezheng/Llama-3-Instruct-8B-SimPO-ExPO
bdf85c5c-6eaa-4df6-a393-66b71aa28952
0.0.1
hfopenllm_v2/chujiezheng_Llama-3-Instruct-8B-SimPO-ExPO/1762652580.1008909
1762652580.100893
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
chujiezheng/Llama-3-Instruct-8B-SimPO-ExPO
chujiezheng/Llama-3-Instruct-8B-SimPO-ExPO
chujiezheng
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6433707008515184}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
TencentARC
TencentARC/LLaMA-Pro-8B-Instruct
98ea850e-7019-4728-a558-8b1819ec47c2
0.0.1
hfopenllm_v2/TencentARC_LLaMA-Pro-8B-Instruct/1762652579.9131231
1762652579.913124
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
TencentARC/LLaMA-Pro-8B-Instruct
TencentARC/LLaMA-Pro-8B-Instruct
TencentARC
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4486063644463357}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.357}
HF Open LLM v2
bigcode
bigcode/starcoder2-3b
7385c595-5b4f-4491-8e71-ece57ffffbd2
0.0.1
hfopenllm_v2/bigcode_starcoder2-3b/1762652580.0331972
1762652580.0331972
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bigcode/starcoder2-3b
bigcode/starcoder2-3b
bigcode
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.20370838264693236}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "Starcoder2ForCausalLM", "params_billions": 3.03}
HF Open LLM v2
bigcode
bigcode/starcoder2-15b
09aa04cf-9369-453f-952a-2f6c74e4707a
0.0.1
hfopenllm_v2/bigcode_starcoder2-15b/1762652580.032956
1762652580.0329568
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bigcode/starcoder2-15b
bigcode/starcoder2-15b
bigcode
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2780223141265177}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Starcoder2ForCausalLM", "params_billions": 15.958}
HF Open LLM v2
bigcode
bigcode/starcoder2-7b
53eac61a-064e-4786-bc94-962382d88f77
0.0.1
hfopenllm_v2/bigcode_starcoder2-7b/1762652580.0333922
1762652580.0333922
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bigcode/starcoder2-7b
bigcode/starcoder2-7b
bigcode
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.22091938279321088}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "Starcoder2ForCausalLM", "params_billions": 7.174}
HF Open LLM v2
ewre324
ewre324/Thinker-SmolLM2-135M-Instruct-Reasoning
5a03703c-6934-437c-aaca-2acfdd4ca629
0.0.1
hfopenllm_v2/ewre324_Thinker-SmolLM2-135M-Instruct-Reasoning/1762652580.148509
1762652580.14851
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
ewre324/Thinker-SmolLM2-135M-Instruct-Reasoning
ewre324/Thinker-SmolLM2-135M-Instruct-Reasoning
ewre324
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25836336476105626}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 0.135}
HF Open LLM v2
ewre324
ewre324/Thinker-Qwen2.5-0.5B-Instruct-Reasoning
fe29c3e7-463b-45a1-8377-97e7c7f21874
0.0.1
hfopenllm_v2/ewre324_Thinker-Qwen2.5-0.5B-Instruct-Reasoning/1762652580.148299
1762652580.1483
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
ewre324/Thinker-Qwen2.5-0.5B-Instruct-Reasoning
ewre324/Thinker-Qwen2.5-0.5B-Instruct-Reasoning
ewre324
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2476473534665798}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 0.494}
HF Open LLM v2
ewre324
ewre324/Thinker-Llama-3.2-3B-Instruct-Reasoning
8bdc63c5-2ed3-4738-8a5c-6b90ba969f99
0.0.1
hfopenllm_v2/ewre324_Thinker-Llama-3.2-3B-Instruct-Reasoning/1762652580.148031
1762652580.148032
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
ewre324/Thinker-Llama-3.2-3B-Instruct-Reasoning
ewre324/Thinker-Llama-3.2-3B-Instruct-Reasoning
ewre324
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44388555698878973}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 3.213}
HF Open LLM v2
ewre324
ewre324/ewre324-R1-SmolLM2-135M-Distill
6429c440-4d89-4d31-919c-63cde25ba99f
0.0.1
hfopenllm_v2/ewre324_ewre324-R1-SmolLM2-135M-Distill/1762652580.148724
1762652580.148725
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
ewre324/ewre324-R1-SmolLM2-135M-Distill
ewre324/ewre324-R1-SmolLM2-135M-Distill
ewre324
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16489026893088118}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 0.135}
HF Open LLM v2
lars1234
lars1234/Mistral-Small-24B-Instruct-2501-writer
89742249-c51e-48e9-8bf1-7aad55e222c1
0.0.1
hfopenllm_v2/lars1234_Mistral-Small-24B-Instruct-2501-writer/1762652580.314311
1762652580.314312
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
lars1234/Mistral-Small-24B-Instruct-2501-writer
lars1234/Mistral-Small-24B-Instruct-2501-writer
lars1234
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6565346613651777}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 23.572}
HF Open LLM v2
xMaulana
xMaulana/FinMatcha-3B-Instruct
105021c8-c214-4a6a-ac3b-747c4c48886e
0.0.1
hfopenllm_v2/xMaulana_FinMatcha-3B-Instruct/1762652580.5969138
1762652580.5969138
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
xMaulana/FinMatcha-3B-Instruct
xMaulana/FinMatcha-3B-Instruct
xMaulana
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7548283000217202}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 3.213}
HF Open LLM v2
BoltMonkey
BoltMonkey/NeuralDaredevil-SuperNova-Lite-7B-DARETIES-abliterated
f83a5d67-b967-47c8-b76e-b58c445a3634
0.0.1
hfopenllm_v2/BoltMonkey_NeuralDaredevil-SuperNova-Lite-7B-DARETIES-abliterated/1762652579.498964
1762652579.498965
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
BoltMonkey/NeuralDaredevil-SuperNova-Lite-7B-DARETIES-abliterated
BoltMonkey/NeuralDaredevil-SuperNova-Lite-7B-DARETIES-abliterated
BoltMonkey
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45902316963434797}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
BoltMonkey
BoltMonkey/NeuralDaredevil-SuperNova-Lite-7B-DARETIES-abliterated
d9e3bd73-cd7e-46d4-9e62-0cfac178f62a
0.0.1
hfopenllm_v2/BoltMonkey_NeuralDaredevil-SuperNova-Lite-7B-DARETIES-abliterated/1762652579.498452
1762652579.498454
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
BoltMonkey/NeuralDaredevil-SuperNova-Lite-7B-DARETIES-abliterated
BoltMonkey/NeuralDaredevil-SuperNova-Lite-7B-DARETIES-abliterated
BoltMonkey
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7998909559967553}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
BoltMonkey
BoltMonkey/SuperNeuralDreadDevil-8b
2ad0eebb-31e3-4f28-aba6-073f33d5cbed
0.0.1
hfopenllm_v2/BoltMonkey_SuperNeuralDreadDevil-8b/1762652579.499188
1762652579.499189
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
BoltMonkey/SuperNeuralDreadDevil-8b
BoltMonkey/SuperNeuralDreadDevil-8b
BoltMonkey
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7709898624538447}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
BoltMonkey
BoltMonkey/DreadMix
e6b5e728-28a4-444a-8b6b-89d29b7b5225
0.0.1
hfopenllm_v2/BoltMonkey_DreadMix/1762652579.497959
1762652579.497961
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
BoltMonkey/DreadMix
BoltMonkey/DreadMix
BoltMonkey
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7094908176970438}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
Telugu-LLM-Labs
Telugu-LLM-Labs/Indic-gemma-2b-finetuned-sft-Navarasa-2.0
ec8a8e25-f985-40a8-80ff-0c7d7595029d
0.0.1
hfopenllm_v2/Telugu-LLM-Labs_Indic-gemma-2b-finetuned-sft-Navarasa-2.0/1762652579.912417
1762652579.912417
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Telugu-LLM-Labs/Indic-gemma-2b-finetuned-sft-Navarasa-2.0
Telugu-LLM-Labs/Indic-gemma-2b-finetuned-sft-Navarasa-2.0
Telugu-LLM-Labs
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.21030310686755588}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "GemmaForCausalLM", "params_billions": 2.506}
HF Open LLM v2
Telugu-LLM-Labs
Telugu-LLM-Labs/Indic-gemma-7b-finetuned-sft-Navarasa-2.0
89d117f3-7a67-4e30-82b2-b42efaf44024
0.0.1
hfopenllm_v2/Telugu-LLM-Labs_Indic-gemma-7b-finetuned-sft-Navarasa-2.0/1762652579.912673
1762652579.912673
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Telugu-LLM-Labs/Indic-gemma-7b-finetuned-sft-Navarasa-2.0
Telugu-LLM-Labs/Indic-gemma-7b-finetuned-sft-Navarasa-2.0
Telugu-LLM-Labs
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32368449048524583}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "GemmaForCausalLM", "params_billions": 8.538}
HF Open LLM v2
AuraIndustries
AuraIndustries/Aura-4B
5fe88e89-1055-4357-9394-004dd4635e58
0.0.1
hfopenllm_v2/AuraIndustries_Aura-4B/1762652579.484812
1762652579.484813
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
AuraIndustries/Aura-4B
AuraIndustries/Aura-4B
AuraIndustries
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38156203318306536}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 4.513}
HF Open LLM v2
AuraIndustries
AuraIndustries/Aura-MoE-2x4B
8239ffac-3fca-4eab-86d4-78bab22dc420
0.0.1
hfopenllm_v2/AuraIndustries_Aura-MoE-2x4B/1762652579.48526
1762652579.485261
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
AuraIndustries/Aura-MoE-2x4B
AuraIndustries/Aura-MoE-2x4B
AuraIndustries
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.460096987105325}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH"...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 7.231}
HF Open LLM v2
AuraIndustries
AuraIndustries/Aura-8B
39e029ad-b385-4b26-9a02-b40c90cd8ad8
0.0.1
hfopenllm_v2/AuraIndustries_Aura-8B/1762652579.485057
1762652579.485057
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
AuraIndustries/Aura-8B
AuraIndustries/Aura-8B
AuraIndustries
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7205315230255722}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
AuraIndustries
AuraIndustries/Aura-MoE-2x4B-v2
3402882b-af4e-4509-9d57-32efa5d8c495
0.0.1
hfopenllm_v2/AuraIndustries_Aura-MoE-2x4B-v2/1762652579.4855082
1762652579.4855092
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
AuraIndustries/Aura-MoE-2x4B-v2
AuraIndustries/Aura-MoE-2x4B-v2
AuraIndustries
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4777822843388875}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 7.231}
HF Open LLM v2
AI4free
AI4free/Dhanishtha
a554a3eb-943c-4135-966b-929129ef025d
0.0.1
hfopenllm_v2/AI4free_Dhanishtha/1762652579.475332
1762652579.475332
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
AI4free/Dhanishtha
AI4free/Dhanishtha
AI4free
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2451240486353985}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.777}
HF Open LLM v2
AI4free
AI4free/t2
332ccdb5-faf5-47c6-afeb-a91d2148adf0
0.0.1
hfopenllm_v2/AI4free_t2/1762652579.475577
1762652579.475578
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
AI4free/t2
AI4free/t2
AI4free
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3866828902866616}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613}
HF Open LLM v2
lt-asset
lt-asset/nova-1.3b
4c3005e9-fffd-491b-8ce1-58204986b787
0.0.1
hfopenllm_v2/lt-asset_nova-1.3b/1762652580.3279538
1762652580.327955
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
lt-asset/nova-1.3b
lt-asset/nova-1.3b
lt-asset
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1214255951985177}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "NovaForCausalLM", "params_billions": 1.347}
HF Open LLM v2
ManoloPueblo
ManoloPueblo/LLM_MERGE_CC3
1c3dfe6a-28e7-4125-a802-1898336b1beb
0.0.1
hfopenllm_v2/ManoloPueblo_LLM_MERGE_CC3/1762652579.7460978
1762652579.746099
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
ManoloPueblo/LLM_MERGE_CC3
ManoloPueblo/LLM_MERGE_CC3
ManoloPueblo
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3958751667797001}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
ManoloPueblo
ManoloPueblo/LLM_MERGE_CC2
f7ca7fb6-b02c-4c27-afef-662bb62cd054
0.0.1
hfopenllm_v2/ManoloPueblo_LLM_MERGE_CC2/1762652579.745891
1762652579.745892
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
ManoloPueblo/LLM_MERGE_CC2
ManoloPueblo/LLM_MERGE_CC2
ManoloPueblo
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3853087585384557}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
ManoloPueblo
ManoloPueblo/ContentCuisine_1-7B-slerp
74d2724e-9d5d-4142-9cff-3fd40c931882
0.0.1
hfopenllm_v2/ManoloPueblo_ContentCuisine_1-7B-slerp/1762652579.745631
1762652579.745632
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
ManoloPueblo/ContentCuisine_1-7B-slerp
ManoloPueblo/ContentCuisine_1-7B-slerp
ManoloPueblo
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3907044419916932}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
sarvamai
sarvamai/OpenHathi-7B-Hi-v0.1-Base
e0c03300-a08f-409e-9f39-f00d5e9e126f
0.0.1
hfopenllm_v2/sarvamai_OpenHathi-7B-Hi-v0.1-Base/1762652580.509491
1762652580.5094929
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
sarvamai/OpenHathi-7B-Hi-v0.1-Base
sarvamai/OpenHathi-7B-Hi-v0.1-Base
sarvamai
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.18040244329490196}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 6.87}
HF Open LLM v2
dnhkng
dnhkng/RYS-Llama-3-Huge-Instruct
0e8dfce1-b0d3-4ba5-a3be-ba6f52421841
0.0.1
hfopenllm_v2/dnhkng_RYS-Llama-3-Huge-Instruct/1762652580.1319628
1762652580.131964
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
dnhkng/RYS-Llama-3-Huge-Instruct
dnhkng/RYS-Llama-3-Huge-Instruct
dnhkng
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7685917809190725}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 99.646}
HF Open LLM v2
dnhkng
dnhkng/RYS-Llama-3-8B-Instruct
85472ae2-d5f0-4896-811b-d4217241bcef
0.0.1
hfopenllm_v2/dnhkng_RYS-Llama-3-8B-Instruct/1762652580.131744
1762652580.131744
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
dnhkng/RYS-Llama-3-8B-Instruct
dnhkng/RYS-Llama-3-8B-Instruct
dnhkng
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6957772044841022}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.248}
HF Open LLM v2
dnhkng
dnhkng/RYS-Llama-3-Large-Instruct
f9485436-6935-422f-9eb1-ee7faeb231d1
0.0.1
hfopenllm_v2/dnhkng_RYS-Llama-3-Large-Instruct/1762652580.132239
1762652580.132241
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
dnhkng/RYS-Llama-3-Large-Instruct
dnhkng/RYS-Llama-3-Large-Instruct
dnhkng
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8050616807847621}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 73.976}
HF Open LLM v2
dnhkng
dnhkng/RYS-XLarge2
6f344c50-fdf3-477e-9a76-558ed61fd509
0.0.1
hfopenllm_v2/dnhkng_RYS-XLarge2/1762652580.1343
1762652580.134301
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
dnhkng/RYS-XLarge2
dnhkng/RYS-XLarge2
dnhkng
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49019712141562166}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 77.965}
HF Open LLM v2
dnhkng
dnhkng/RYS-Phi-3-medium-4k-instruct
94f92919-36fb-4aed-8c0c-2bee0cd1d301
0.0.1
hfopenllm_v2/dnhkng_RYS-Phi-3-medium-4k-instruct/1762652580.133586
1762652580.133587
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
dnhkng/RYS-Phi-3-medium-4k-instruct
dnhkng/RYS-Phi-3-medium-4k-instruct
dnhkng
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4391392616036561}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Phi3ForCausalLM", "params_billions": 17.709}
HF Open LLM v2
dnhkng
dnhkng/RYS-Medium
ca1e127b-ded1-4015-85b9-be134c26644d
0.0.1
hfopenllm_v2/dnhkng_RYS-Medium/1762652580.131469
1762652580.13147
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
dnhkng/RYS-Medium
dnhkng/RYS-Medium
dnhkng
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4406131287206833}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Phi3ForCausalLM", "params_billions": 18.731}
HF Open LLM v2
dnhkng
dnhkng/RYS-XLarge
a2a90b7e-f6db-408a-b5df-284d0b4a6353
0.0.1
hfopenllm_v2/dnhkng_RYS-XLarge/1762652580.1338398
1762652580.1338408
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
dnhkng/RYS-XLarge
dnhkng/RYS-XLarge
dnhkng
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7995662619627034}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 77.965}
HF Open LLM v2
dnhkng
dnhkng/RYS-XLarge-base
1b0bb4ca-9553-4ddd-bf35-cab66685668d
0.0.1
hfopenllm_v2/dnhkng_RYS-XLarge-base/1762652580.134071
1762652580.134072
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
dnhkng/RYS-XLarge-base
dnhkng/RYS-XLarge-base
dnhkng
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7910233735377686}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 77.972}
HF Open LLM v2
dnhkng
dnhkng/RYS-Llama-3.1-8B-Instruct
62dab9bd-df83-4a0b-be94-0ddd981da6e4
0.0.1
hfopenllm_v2/dnhkng_RYS-Llama-3.1-8B-Instruct/1762652580.132753
1762652580.1327538
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
dnhkng/RYS-Llama-3.1-8B-Instruct
dnhkng/RYS-Llama-3.1-8B-Instruct
dnhkng
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7684920455502511}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "?", "params_billions": 8.685}
HF Open LLM v2
swap-uniba
swap-uniba/LLaMAntino-3-ANITA-8B-Inst-DPO-ITA
f2475574-fc9d-4cd1-94fb-ddd8bb89fa95
0.0.1
hfopenllm_v2/swap-uniba_LLaMAntino-3-ANITA-8B-Inst-DPO-ITA/1762652580.550269
1762652580.5502698
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
swap-uniba/LLaMAntino-3-ANITA-8B-Inst-DPO-ITA
swap-uniba/LLaMAntino-3-ANITA-8B-Inst-DPO-ITA
swap-uniba
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4815046299374548}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
xukp20
xukp20/Llama-3-8B-Instruct-SPPO-Iter3_bt_8b-table
406f36fc-1243-4342-80c6-95b96fcc003f
0.0.1
hfopenllm_v2/xukp20_Llama-3-8B-Instruct-SPPO-Iter3_bt_8b-table/1762652580.600485
1762652580.6004858
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
xukp20/Llama-3-8B-Instruct-SPPO-Iter3_bt_8b-table
xukp20/Llama-3-8B-Instruct-SPPO-Iter3_bt_8b-table
xukp20
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7034457461757027}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}