_leaderboard
stringclasses
1 value
_developer
stringclasses
559 values
_model
stringlengths
9
102
_uuid
stringlengths
36
36
schema_version
stringclasses
1 value
evaluation_id
stringlengths
35
133
retrieved_timestamp
stringlengths
13
18
source_data
stringclasses
1 value
evaluation_source_name
stringclasses
1 value
evaluation_source_type
stringclasses
1 value
source_organization_name
stringclasses
1 value
source_organization_url
null
source_organization_logo_url
null
evaluator_relationship
stringclasses
1 value
model_name
stringlengths
4
102
model_id
stringlengths
9
102
model_developer
stringclasses
559 values
model_inference_platform
stringclasses
1 value
evaluation_results
stringlengths
1.35k
1.41k
additional_details
stringclasses
660 values
HF Open LLM v2
Qwen
Qwen/Qwen2-0.5B-Instruct
6986e9f0-d008-4418-b3cb-1e870cf57e02
0.0.1
hfopenllm_v2/Qwen_Qwen2-0.5B-Instruct/1762652579.839177
1762652579.839178
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Qwen/Qwen2-0.5B-Instruct
Qwen/Qwen2-0.5B-Instruct
Qwen
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.22466610814860127}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 0.494}
HF Open LLM v2
Qwen
Qwen/Qwen2-7B-Instruct
3e1ebb01-6fbb-498c-af58-022f50247ec9
0.0.1
hfopenllm_v2/Qwen_Qwen2-7B-Instruct/1762652579.84092
1762652579.84092
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Qwen/Qwen2-7B-Instruct
Qwen/Qwen2-7B-Instruct
Qwen
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5679075962889577}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
HF Open LLM v2
Qwen
Qwen/Qwen2.5-7B-Instruct-1M
f338f8b3-d2fa-46e6-b2a1-b83303521b3f
0.0.1
hfopenllm_v2/Qwen_Qwen2.5-7B-Instruct-1M/1762652579.845428
1762652579.845428
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Qwen/Qwen2.5-7B-Instruct-1M
Qwen/Qwen2.5-7B-Instruct-1M
Qwen
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7447616767953474}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
HF Open LLM v2
Qwen
Qwen/Qwen2.5-Math-7B-Instruct
6ba8109e-8906-420f-a780-d0bef4015e1a
0.0.1
hfopenllm_v2/Qwen_Qwen2.5-Math-7B-Instruct/1762652579.848376
1762652579.848377
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Qwen/Qwen2.5-Math-7B-Instruct
Qwen/Qwen2.5-Math-7B-Instruct
Qwen
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26358395723347383}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.0}
HF Open LLM v2
Qwen
Qwen/Qwen2.5-Coder-14B-Instruct
f2295cf4-86e0-4c73-8f3d-21c6e5ccd9d9
0.0.1
hfopenllm_v2/Qwen_Qwen2.5-Coder-14B-Instruct/1762652579.846175
1762652579.846175
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Qwen/Qwen2.5-Coder-14B-Instruct
Qwen/Qwen2.5-Coder-14B-Instruct
Qwen
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6907560827493273}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.77}
HF Open LLM v2
Qwen
Qwen/Qwen2.5-Coder-32B-Instruct
c0ca7adb-6221-415f-8ed6-0de6439db168
0.0.1
hfopenllm_v2/Qwen_Qwen2.5-Coder-32B-Instruct/1762652579.846655
1762652579.846655
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Qwen/Qwen2.5-Coder-32B-Instruct
Qwen/Qwen2.5-Coder-32B-Instruct
Qwen
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7265267268625026}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 32.764}
HF Open LLM v2
Qwen
Qwen/Qwen2.5-14B-Instruct-1M
52ff136b-084f-4ca3-a48e-83fb0bbd8ebc
0.0.1
hfopenllm_v2/Qwen_Qwen2.5-14B-Instruct-1M/1762652579.843473
1762652579.843473
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Qwen/Qwen2.5-14B-Instruct-1M
Qwen/Qwen2.5-14B-Instruct-1M
Qwen
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8413564896696322}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.77}
HF Open LLM v2
Qwen
Qwen/Qwen2.5-3B-Instruct
9fb4e863-fd72-4b60-bc20-e32e64ce99e8
0.0.1
hfopenllm_v2/Qwen_Qwen2.5-3B-Instruct/1762652579.844352
1762652579.844352
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Qwen/Qwen2.5-3B-Instruct
Qwen/Qwen2.5-3B-Instruct
Qwen
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6474919879253713}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 3.0}
HF Open LLM v2
Qwen
Qwen/Qwen2.5-Math-1.5B-Instruct
393c9602-bd87-48d7-ad95-6baf85ed3341
0.0.1
hfopenllm_v2/Qwen_Qwen2.5-Math-1.5B-Instruct/1762652579.84755
1762652579.84755
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Qwen/Qwen2.5-Math-1.5B-Instruct
Qwen/Qwen2.5-Math-1.5B-Instruct
Qwen
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1855731680829089}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
HF Open LLM v2
Jimmy19991222
Jimmy19991222/Llama-3-Instruct-8B-SimPO-v0.2
4d7428e8-41a2-4834-900e-e43b05f4d131
0.0.1
hfopenllm_v2/Jimmy19991222_Llama-3-Instruct-8B-SimPO-v0.2/1762652579.692669
1762652579.692669
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Jimmy19991222/Llama-3-Instruct-8B-SimPO-v0.2
Jimmy19991222/Llama-3-Instruct-8B-SimPO-v0.2
Jimmy19991222
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6540368444615842}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
Jimmy19991222
Jimmy19991222/llama-3-8b-instruct-gapo-v2-bert_f1-beta10-gamma0.3-lr1.0e-6-scale-log
913d1072-8ea3-4e0d-9d72-d30ae186dc7d
0.0.1
hfopenllm_v2/Jimmy19991222_llama-3-8b-instruct-gapo-v2-bert_f1-beta10-gamma0.3-lr1.0e-6-scale-log/1762652579.6931531
1762652579.693154
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Jimmy19991222/llama-3-8b-instruct-gapo-v2-bert_f1-beta10-gamma0.3-lr1.0e-6-scale-log
Jimmy19991222/llama-3-8b-instruct-gapo-v2-bert_f1-beta10-gamma0.3-lr1.0e-6-scale-log
Jimmy19991222
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6555605792630221}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
Jimmy19991222
Jimmy19991222/llama-3-8b-instruct-gapo-v2-bert_p-beta10-gamma0.3-lr1.0e-6-scale-log
55baee54-fb05-49a1-962d-145a93de91a8
0.0.1
hfopenllm_v2/Jimmy19991222_llama-3-8b-instruct-gapo-v2-bert_p-beta10-gamma0.3-lr1.0e-6-scale-log/1762652579.693368
1762652579.6933692
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Jimmy19991222/llama-3-8b-instruct-gapo-v2-bert_p-beta10-gamma0.3-lr1.0e-6-scale-log
Jimmy19991222/llama-3-8b-instruct-gapo-v2-bert_p-beta10-gamma0.3-lr1.0e-6-scale-log
Jimmy19991222
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6315055164740666}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
Jimmy19991222
Jimmy19991222/llama-3-8b-instruct-gapo-v2-bert-f1-beta10-gamma0.3-lr1.0e-6-1minus-rerun
9e8f395c-f481-4a64-86ee-053961b17c42
0.0.1
hfopenllm_v2/Jimmy19991222_llama-3-8b-instruct-gapo-v2-bert-f1-beta10-gamma0.3-lr1.0e-6-1minus-rerun/1762652579.6929338
1762652579.692935
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Jimmy19991222/llama-3-8b-instruct-gapo-v2-bert-f1-beta10-gamma0.3-lr1.0e-6-1minus-rerun
Jimmy19991222/llama-3-8b-instruct-gapo-v2-bert-f1-beta10-gamma0.3-lr1.0e-6-1minus-rerun
Jimmy19991222
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6717221416951467}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
Jimmy19991222
Jimmy19991222/llama-3-8b-instruct-gapo-v2-rougeL-beta10-gamma0.3-lr1.0e-6-scale-log
6621f47a-13c7-421c-b054-cc9116a04e4e
0.0.1
hfopenllm_v2/Jimmy19991222_llama-3-8b-instruct-gapo-v2-rougeL-beta10-gamma0.3-lr1.0e-6-scale-log/1762652579.694266
1762652579.6942668
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Jimmy19991222/llama-3-8b-instruct-gapo-v2-rougeL-beta10-gamma0.3-lr1.0e-6-scale-log
Jimmy19991222/llama-3-8b-instruct-gapo-v2-rougeL-beta10-gamma0.3-lr1.0e-6-scale-log
Jimmy19991222
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.649190813707629}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH"...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
Jimmy19991222
Jimmy19991222/llama-3-8b-instruct-gapo-v2-bleu-beta0.1-no-length-scale-gamma0.4
601e250a-5c2f-4947-9ea3-0f903b2823ec
0.0.1
hfopenllm_v2/Jimmy19991222_llama-3-8b-instruct-gapo-v2-bleu-beta0.1-no-length-scale-gamma0.4/1762652579.69359
1762652579.693591
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Jimmy19991222/llama-3-8b-instruct-gapo-v2-bleu-beta0.1-no-length-scale-gamma0.4
Jimmy19991222/llama-3-8b-instruct-gapo-v2-bleu-beta0.1-no-length-scale-gamma0.4
Jimmy19991222
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6284580468711907}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
Jimmy19991222
Jimmy19991222/llama-3-8b-instruct-gapo-v2-rouge2-beta10-gamma0.3-lr1.0e-6-scale-log
5f6d2c1e-1c66-4b1c-beed-a730d93d997f
0.0.1
hfopenllm_v2/Jimmy19991222_llama-3-8b-instruct-gapo-v2-rouge2-beta10-gamma0.3-lr1.0e-6-scale-log/1762652579.69404
1762652579.694041
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Jimmy19991222/llama-3-8b-instruct-gapo-v2-rouge2-beta10-gamma0.3-lr1.0e-6-scale-log
Jimmy19991222/llama-3-8b-instruct-gapo-v2-rouge2-beta10-gamma0.3-lr1.0e-6-scale-log
Jimmy19991222
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6605063453857986}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
Jimmy19991222
Jimmy19991222/llama-3-8b-instruct-gapo-v2-rouge2-beta10-1minus-gamma0.3-rerun
8ab1619c-6edf-457e-9834-0e9dc127d6a4
0.0.1
hfopenllm_v2/Jimmy19991222_llama-3-8b-instruct-gapo-v2-rouge2-beta10-1minus-gamma0.3-rerun/1762652579.69381
1762652579.693811
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
Jimmy19991222/llama-3-8b-instruct-gapo-v2-rouge2-beta10-1minus-gamma0.3-rerun
Jimmy19991222/llama-3-8b-instruct-gapo-v2-rouge2-beta10-1minus-gamma0.3-rerun
Jimmy19991222
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6677504576745258}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
HF Open LLM v2
BenevolenceMessiah
BenevolenceMessiah/Yi-Coder-9B-Chat-Instruct-TIES-MoE-v1.0
129ba653-ec88-46f2-8828-77e320b922c6
0.0.1
hfopenllm_v2/BenevolenceMessiah_Yi-Coder-9B-Chat-Instruct-TIES-MoE-v1.0/1762652579.4948769
1762652579.494878
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
BenevolenceMessiah/Yi-Coder-9B-Chat-Instruct-TIES-MoE-v1.0
BenevolenceMessiah/Yi-Coder-9B-Chat-Instruct-TIES-MoE-v1.0
BenevolenceMessiah
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3011531624977283}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 28.309}
HF Open LLM v2
BenevolenceMessiah
BenevolenceMessiah/Qwen2.5-72B-2x-Instruct-TIES-v1.0
ad8e3029-612c-434e-a92b-f5c481476e25
0.0.1
hfopenllm_v2/BenevolenceMessiah_Qwen2.5-72B-2x-Instruct-TIES-v1.0/1762652579.4945831
1762652579.494584
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
BenevolenceMessiah/Qwen2.5-72B-2x-Instruct-TIES-v1.0
BenevolenceMessiah/Qwen2.5-72B-2x-Instruct-TIES-v1.0
BenevolenceMessiah
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5473499204333391}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 72.7}
HF Open LLM v2
bamec66557
bamec66557/VICIOUS_MESH-12B-EPSILON
38864e75-9bb0-4eaa-ba87-c631838a9ad1
0.0.1
hfopenllm_v2/bamec66557_VICIOUS_MESH-12B-EPSILON/1762652580.0279832
1762652580.0279832
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/VICIOUS_MESH-12B-EPSILON
bamec66557/VICIOUS_MESH-12B-EPSILON
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6304560787599126}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 6.124}
HF Open LLM v2
bamec66557
bamec66557/MISCHIEVOUS-12B-Mix_0.3v
8e2e1f2f-4715-4b8b-b641-d5e552500408
0.0.1
hfopenllm_v2/bamec66557_MISCHIEVOUS-12B-Mix_0.3v/1762652580.02432
1762652580.024322
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/MISCHIEVOUS-12B-Mix_0.3v
bamec66557/MISCHIEVOUS-12B-Mix_0.3v
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38698209639312575}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
bamec66557
bamec66557/VICIOUS_MESH-12B-BETA
2f023511-2446-48f8-83e5-47225f15e905
0.0.1
hfopenllm_v2/bamec66557_VICIOUS_MESH-12B-BETA/1762652580.0273511
1762652580.0273511
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/VICIOUS_MESH-12B-BETA
bamec66557/VICIOUS_MESH-12B-BETA
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6720967034136092}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
bamec66557
bamec66557/VICIOUS_MESH-12B-DELTA
fcaf0de1-f4f5-4bfb-8276-29b3b1f5b5be
0.0.1
hfopenllm_v2/bamec66557_VICIOUS_MESH-12B-DELTA/1762652580.027563
1762652580.027563
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/VICIOUS_MESH-12B-DELTA
bamec66557/VICIOUS_MESH-12B-DELTA
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6468924675416783}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 6.124}
HF Open LLM v2
bamec66557
bamec66557/VICIOUS_MESH-12B-NEMO
6a9c649c-fbcd-489a-bc01-083014932a45
0.0.1
hfopenllm_v2/bamec66557_VICIOUS_MESH-12B-NEMO/1762652580.028384
1762652580.028385
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/VICIOUS_MESH-12B-NEMO
bamec66557/VICIOUS_MESH-12B-NEMO
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40221944440750546}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
bamec66557
bamec66557/MISCHIEVOUS-12B-Mix_0.2v
d509b0d3-a043-4057-bf80-37ec5ceedeed
0.0.1
hfopenllm_v2/bamec66557_MISCHIEVOUS-12B-Mix_0.2v/1762652580.023869
1762652580.02387
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/MISCHIEVOUS-12B-Mix_0.2v
bamec66557/MISCHIEVOUS-12B-Mix_0.2v
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3623773809048879}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
bamec66557
bamec66557/NameLess-12B-prob
81670e41-16d6-43a6-9af9-6924a52a8300
0.0.1
hfopenllm_v2/bamec66557_NameLess-12B-prob/1762652580.026292
1762652580.026293
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/NameLess-12B-prob
bamec66557/NameLess-12B-prob
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6602315190361574}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
bamec66557
bamec66557/MISCHIEVOUS-12B-Mix_III_IV_V
c2e334b3-e82d-40bb-a6ed-9a941bf2352a
0.0.1
hfopenllm_v2/bamec66557_MISCHIEVOUS-12B-Mix_III_IV_V/1762652580.0253649
1762652580.025366
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/MISCHIEVOUS-12B-Mix_III_IV_V
bamec66557/MISCHIEVOUS-12B-Mix_III_IV_V
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40309379114083965}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
bamec66557
bamec66557/mergekit-ties-sinbkow
b8c00b3b-c35a-4511-965b-6096e9b116de
0.0.1
hfopenllm_v2/bamec66557_mergekit-ties-sinbkow/1762652580.029482
1762652580.029482
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/mergekit-ties-sinbkow
bamec66557/mergekit-ties-sinbkow
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6431956098706986}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 6.124}
HF Open LLM v2
bamec66557
bamec66557/MISCHIEVOUS-12B-Mix_0.6v
a58c4863-e5a9-425d-ad3e-5924d6146718
0.0.1
hfopenllm_v2/bamec66557_MISCHIEVOUS-12B-Mix_0.6v/1762652580.025138
1762652580.0251389
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/MISCHIEVOUS-12B-Mix_0.6v
bamec66557/MISCHIEVOUS-12B-Mix_0.6v
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43656608908806416}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
bamec66557
bamec66557/MISCHIEVOUS-12B-Mix_0.4v
4072cc72-b6b4-4a5d-8f01-f9f8437ea569
0.0.1
hfopenllm_v2/bamec66557_MISCHIEVOUS-12B-Mix_0.4v/1762652580.024673
1762652580.024674
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/MISCHIEVOUS-12B-Mix_0.4v
bamec66557/MISCHIEVOUS-12B-Mix_0.4v
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6508142838778884}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
bamec66557
bamec66557/VICIOUS_MESH-12B-DIGAMMA
67e74757-9950-499e-9258-7ccd20b29835
0.0.1
hfopenllm_v2/bamec66557_VICIOUS_MESH-12B-DIGAMMA/1762652580.027769
1762652580.02777
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/VICIOUS_MESH-12B-DIGAMMA
bamec66557/VICIOUS_MESH-12B-DIGAMMA
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6429207835210575}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 6.124}
HF Open LLM v2
bamec66557
bamec66557/VICIOUS_MESH-12B-GAMMA
4507a6c1-bfff-4e8d-92c6-7e923f74c4dc
0.0.1
hfopenllm_v2/bamec66557_VICIOUS_MESH-12B-GAMMA/1762652580.028181
1762652580.028182
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/VICIOUS_MESH-12B-GAMMA
bamec66557/VICIOUS_MESH-12B-GAMMA
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6361764562472019}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
bamec66557
bamec66557/VICIOUS_MESH-12B-0.1v
2d468a71-7364-40eb-8a98-1dbac956b3cf
0.0.1
hfopenllm_v2/bamec66557_VICIOUS_MESH-12B-0.1v/1762652580.026718
1762652580.026719
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/VICIOUS_MESH-12B-0.1v
bamec66557/VICIOUS_MESH-12B-0.1v
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36574954454181574}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 6.124}
HF Open LLM v2
bamec66557
bamec66557/VICIOUS_MESH-12B-UNION
20d0e946-e7cf-48a6-a81e-f73d774e0e2b
0.0.1
hfopenllm_v2/bamec66557_VICIOUS_MESH-12B-UNION/1762652580.028806
1762652580.028807
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/VICIOUS_MESH-12B-UNION
bamec66557/VICIOUS_MESH-12B-UNION
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6428709158366468}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 6.124}
HF Open LLM v2
bamec66557
bamec66557/VICIOUS_MESH-12B-0.X.ver
d0c92f20-72d0-431c-b8ba-881b3a6ae158
0.0.1
hfopenllm_v2/bamec66557_VICIOUS_MESH-12B-0.X.ver/1762652580.0269299
1762652580.0269299
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/VICIOUS_MESH-12B-0.X.ver
bamec66557/VICIOUS_MESH-12B-0.X.ver
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37756486123485683}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 6.124}
HF Open LLM v2
bamec66557
bamec66557/MISCHIEVOUS-12B-Mix_III_ex_V
6f31292a-b09f-4e2c-ae3c-b093c5ba06c6
0.0.1
hfopenllm_v2/bamec66557_MISCHIEVOUS-12B-Mix_III_ex_V/1762652580.025593
1762652580.025593
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/MISCHIEVOUS-12B-Mix_III_ex_V
bamec66557/MISCHIEVOUS-12B-Mix_III_ex_V
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43162032296528763}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
bamec66557
bamec66557/MISCHIEVOUS-12B-Mix_0.5v
fa2e9cff-4a7b-4efd-98ca-b8fd2cb33928
0.0.1
hfopenllm_v2/bamec66557_MISCHIEVOUS-12B-Mix_0.5v/1762652580.0249128
1762652580.024914
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/MISCHIEVOUS-12B-Mix_0.5v
bamec66557/MISCHIEVOUS-12B-Mix_0.5v
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3745672593163916}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
bamec66557
bamec66557/mergekit-model_stock-zdaysvi
8932da66-d29a-4453-9b61-bee48f1a28f1
0.0.1
hfopenllm_v2/bamec66557_mergekit-model_stock-zdaysvi/1762652580.029272
1762652580.029272
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/mergekit-model_stock-zdaysvi
bamec66557/mergekit-model_stock-zdaysvi
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6425960894870055}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 6.124}
HF Open LLM v2
bamec66557
bamec66557/VICIOUS_MESH-12B_Razor
950f6bff-e0ec-4556-85b7-81444008d1d4
0.0.1
hfopenllm_v2/bamec66557_VICIOUS_MESH-12B_Razor/1762652580.029016
1762652580.029016
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/VICIOUS_MESH-12B_Razor
bamec66557/VICIOUS_MESH-12B_Razor
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37364304489864675}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 6.124}
HF Open LLM v2
bamec66557
bamec66557/MISCHIEVOUS-12B-Mix_Neo
089a5215-70a4-4255-ac01-1b70d4e8a494
0.0.1
hfopenllm_v2/bamec66557_MISCHIEVOUS-12B-Mix_Neo/1762652580.0258071
1762652580.0258079
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/MISCHIEVOUS-12B-Mix_Neo
bamec66557/MISCHIEVOUS-12B-Mix_Neo
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6249606599378538}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
bamec66557
bamec66557/MISCHIEVOUS-12B
49ec948c-c06d-4c01-be83-9f74ed15ea17
0.0.1
hfopenllm_v2/bamec66557_MISCHIEVOUS-12B/1762652580.02337
1762652580.02337
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/MISCHIEVOUS-12B
bamec66557/MISCHIEVOUS-12B
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3851835352420466}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
bamec66557
bamec66557/VICIOUS_MESH-12B-OMEGA
a630e843-ec9c-432b-986a-2b181c789507
0.0.1
hfopenllm_v2/bamec66557_VICIOUS_MESH-12B-OMEGA/1762652580.028594
1762652580.028594
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/VICIOUS_MESH-12B-OMEGA
bamec66557/VICIOUS_MESH-12B-OMEGA
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6699734482284783}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
bamec66557
bamec66557/VICIOUS_MESH-12B-ALPHA
0053cf6a-0e1e-49c5-8d0a-b3d7254e22f3
0.0.1
hfopenllm_v2/bamec66557_VICIOUS_MESH-12B-ALPHA/1762652580.0271401
1762652580.027141
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/VICIOUS_MESH-12B-ALPHA
bamec66557/VICIOUS_MESH-12B-ALPHA
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6365011502812536}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
bamec66557
bamec66557/MISCHIEVOUS-12B-Mix_0.1v
ae256440-486f-43cf-b4a3-8d5c0ff196c9
0.0.1
hfopenllm_v2/bamec66557_MISCHIEVOUS-12B-Mix_0.1v/1762652580.023659
1762652580.023659
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/MISCHIEVOUS-12B-Mix_0.1v
bamec66557/MISCHIEVOUS-12B-Mix_0.1v
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36362628935668473}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
HF Open LLM v2
bamec66557
bamec66557/VICIOUS_MESH-12B
f2ef86c9-e968-42e0-a0d0-1cf79f9c249b
0.0.1
hfopenllm_v2/bamec66557_VICIOUS_MESH-12B/1762652580.026504
1762652580.026504
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
bamec66557/VICIOUS_MESH-12B
bamec66557/VICIOUS_MESH-12B
bamec66557
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37156965739792636}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 6.124}
HF Open LLM v2
allknowingroger
allknowingroger/MultiMash6-12B-slerp
195b1c31-c766-479c-a445-39a6150404fc
0.0.1
hfopenllm_v2/allknowingroger_MultiMash6-12B-slerp/1762652579.992992
1762652579.992993
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/MultiMash6-12B-slerp
allknowingroger/MultiMash6-12B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43004672047943904}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 12.879}
HF Open LLM v2
allknowingroger
allknowingroger/Quen2-65B
4bc3f55b-0638-4fc2-b1d9-04780707acef
0.0.1
hfopenllm_v2/allknowingroger_Quen2-65B/1762652579.9981499
1762652579.9981499
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Quen2-65B
allknowingroger/Quen2-65B
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17578137120617737}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 63.923}
HF Open LLM v2
allknowingroger
allknowingroger/Yislerp-34B
723d2f60-f12a-4abb-9061-807fd38e7d51
0.0.1
hfopenllm_v2/allknowingroger_Yislerp-34B/1762652580.0049741
1762652580.004975
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Yislerp-34B
allknowingroger/Yislerp-34B
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3691970637907419}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 34.389}
HF Open LLM v2
allknowingroger
allknowingroger/Ph3merge2-14B
b5790fec-6c12-42a3-853c-488658bf949d
0.0.1
hfopenllm_v2/allknowingroger_Ph3merge2-14B/1762652579.996639
1762652579.99664
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Ph3merge2-14B
allknowingroger/Ph3merge2-14B
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17061064641817045}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "Phi3ForCausalLM", "params_billions": 13.619}
HF Open LLM v2
allknowingroger
allknowingroger/MultiMash11-13B-slerp
1b3bfb2a-8290-4af0-bdac-24397a5b6f86
0.0.1
hfopenllm_v2/allknowingroger_MultiMash11-13B-slerp/1762652579.992343
1762652579.9923441
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/MultiMash11-13B-slerp
allknowingroger/MultiMash11-13B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4251009543566625}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 12.879}
HF Open LLM v2
allknowingroger
allknowingroger/NeuralWestSeverus-7B-slerp
fc6d4451-0a9c-4d53-8d22-179ff7059d61
0.0.1
hfopenllm_v2/allknowingroger_NeuralWestSeverus-7B-slerp/1762652579.995253
1762652579.995254
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/NeuralWestSeverus-7B-slerp
allknowingroger/NeuralWestSeverus-7B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41356046401326263}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
allknowingroger
allknowingroger/Multimerge-19B-pass
818e21b8-da78-4649-a71a-ba71c89d1fe7
0.0.1
hfopenllm_v2/allknowingroger_Multimerge-19B-pass/1762652579.9948218
1762652579.994823
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Multimerge-19B-pass
allknowingroger/Multimerge-19B-pass
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17730510600761534}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 19.188}
HF Open LLM v2
allknowingroger
allknowingroger/MultiMash10-13B-slerp
7e4b1f44-73f9-4a6d-9d66-91c60e69e3d2
0.0.1
hfopenllm_v2/allknowingroger_MultiMash10-13B-slerp/1762652579.992115
1762652579.992116
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/MultiMash10-13B-slerp
allknowingroger/MultiMash10-13B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41628323958208663}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 12.879}
HF Open LLM v2
allknowingroger
allknowingroger/Ph3merge3-14B
e5d9bded-a8e4-4133-84b9-6eac517a4226
0.0.1
hfopenllm_v2/allknowingroger_Ph3merge3-14B/1762652579.99685
1762652579.996851
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Ph3merge3-14B
allknowingroger/Ph3merge3-14B
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1645157072124186}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Phi3ForCausalLM", "params_billions": 13.619}
HF Open LLM v2
allknowingroger
allknowingroger/LimyQstar-7B-slerp
ac45b8ec-454f-4a91-9418-a3dc70535119
0.0.1
hfopenllm_v2/allknowingroger_LimyQstar-7B-slerp/1762652579.98914
1762652579.989141
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/LimyQstar-7B-slerp
allknowingroger/LimyQstar-7B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34911368502240725}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
allknowingroger
allknowingroger/HomerSlerp2-7B
ea9cc238-75d0-45e7-b10e-e214516ca36e
0.0.1
hfopenllm_v2/allknowingroger_HomerSlerp2-7B/1762652579.988459
1762652579.98846
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/HomerSlerp2-7B
allknowingroger/HomerSlerp2-7B
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44868172005833407}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
HF Open LLM v2
allknowingroger
allknowingroger/YamMaths-7B-slerp
52ab1e94-4e6f-4876-932b-a45a033dec1b
0.0.1
hfopenllm_v2/allknowingroger_YamMaths-7B-slerp/1762652580.003488
1762652580.003489
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/YamMaths-7B-slerp
allknowingroger/YamMaths-7B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4148093724650594}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
allknowingroger
allknowingroger/MultiCalm-7B-slerp
36176ae9-e852-4604-9961-b7f02e4c3e55
0.0.1
hfopenllm_v2/allknowingroger_MultiCalm-7B-slerp/1762652579.991671
1762652579.991672
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/MultiCalm-7B-slerp
allknowingroger/MultiCalm-7B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3926526061960044}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
allknowingroger
allknowingroger/MixTAO-19B-pass
275fb96e-4779-479b-937b-f5db6aa530ea
0.0.1
hfopenllm_v2/allknowingroger_MixTAO-19B-pass/1762652579.991234
1762652579.991235
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/MixTAO-19B-pass
allknowingroger/MixTAO-19B-pass
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3814368098866563}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 19.188}
HF Open LLM v2
allknowingroger
allknowingroger/HomerSlerp1-7B
340dfc7b-9af0-4545-9d7b-6950ea69bd57
0.0.1
hfopenllm_v2/allknowingroger_HomerSlerp1-7B/1762652579.988248
1762652579.988249
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/HomerSlerp1-7B
allknowingroger/HomerSlerp1-7B
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46212050692163464}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
HF Open LLM v2
allknowingroger
allknowingroger/Neuralmultiverse-7B-slerp
b98b76ea-b068-46ec-b929-4ca1037eaf99
0.0.1
hfopenllm_v2/allknowingroger_Neuralmultiverse-7B-slerp/1762652579.995954
1762652579.995955
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Neuralmultiverse-7B-slerp
allknowingroger/Neuralmultiverse-7B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3769154731667531}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
allknowingroger
allknowingroger/Weirdslerp2-25B
61e517f7-e2db-48bd-8f4e-f62b5859b62e
0.0.1
hfopenllm_v2/allknowingroger_Weirdslerp2-25B/1762652580.00309
1762652580.0030909
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Weirdslerp2-25B
allknowingroger/Weirdslerp2-25B
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1754068094877148}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 25.204}
HF Open LLM v2
allknowingroger
allknowingroger/RogerMerge-7B-slerp
50289a8b-4522-4dca-b6dc-aa42193deefa
0.0.1
hfopenllm_v2/allknowingroger_RogerMerge-7B-slerp/1762652580.002474
1762652580.002475
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/RogerMerge-7B-slerp
allknowingroger/RogerMerge-7B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39330199426410817}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
allknowingroger
allknowingroger/MultiMash-12B-slerp
ed27cd90-e73f-4432-aed9-dd36f29cba1a
0.0.1
hfopenllm_v2/allknowingroger_MultiMash-12B-slerp/1762652579.991891
1762652579.9918919
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/MultiMash-12B-slerp
allknowingroger/MultiMash-12B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39744876926554873}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 12.879}
HF Open LLM v2
allknowingroger
allknowingroger/MultiMash2-12B-slerp
af52a422-e959-4662-98e8-c94fa83bee3e
0.0.1
hfopenllm_v2/allknowingroger_MultiMash2-12B-slerp/1762652579.992556
1762652579.992556
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/MultiMash2-12B-slerp
allknowingroger/MultiMash2-12B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42607503645881817}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 12.879}
HF Open LLM v2
allknowingroger
allknowingroger/Multimash3-12B-slerp
80aa0629-7ea1-4f69-b302-c0502abcbbab
0.0.1
hfopenllm_v2/allknowingroger_Multimash3-12B-slerp/1762652579.994557
1762652579.994557
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Multimash3-12B-slerp
allknowingroger/Multimash3-12B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44371046600796993}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 12.879}
HF Open LLM v2
allknowingroger
allknowingroger/HomerSlerp4-7B
988da677-c00d-4e7c-847e-6ca553e0124b
0.0.1
hfopenllm_v2/allknowingroger_HomerSlerp4-7B/1762652579.988936
1762652579.988937
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/HomerSlerp4-7B
allknowingroger/HomerSlerp4-7B
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43741605606457534}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
HF Open LLM v2
allknowingroger
allknowingroger/Ph3task2-14B
5d818d86-2caf-4b29-9c15-8fa27217de22
0.0.1
hfopenllm_v2/allknowingroger_Ph3task2-14B/1762652579.99728
1762652579.997281
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Ph3task2-14B
allknowingroger/Ph3task2-14B
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4713127834146731}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "Phi3ForCausalLM", "params_billions": 13.96}
HF Open LLM v2
allknowingroger
allknowingroger/MultiMash9-13B-slerp
6a0f5973-6377-4707-a0e3-414ca1f22b32
0.0.1
hfopenllm_v2/allknowingroger_MultiMash9-13B-slerp/1762652579.994061
1762652579.994061
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/MultiMash9-13B-slerp
allknowingroger/MultiMash9-13B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4187810564856802}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 12.879}
HF Open LLM v2
allknowingroger
allknowingroger/Ph3task1-14B
718ef6de-5926-4a4c-bade-9a162ce8e730
0.0.1
hfopenllm_v2/allknowingroger_Ph3task1-14B/1762652579.997059
1762652579.99706
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Ph3task1-14B
allknowingroger/Ph3task1-14B
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46946435457918323}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "Phi3ForCausalLM", "params_billions": 13.96}
HF Open LLM v2
allknowingroger
allknowingroger/MixTaoTruthful-13B-slerp
003c05a1-abb7-41d3-a264-efc6923b64ef
0.0.1
hfopenllm_v2/allknowingroger_MixTaoTruthful-13B-slerp/1762652579.991453
1762652579.991454
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/MixTaoTruthful-13B-slerp
allknowingroger/MixTaoTruthful-13B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41388515804731446}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 12.879}
HF Open LLM v2
allknowingroger
allknowingroger/Neuralcoven-7B-slerp
ba46f82b-2129-43db-ae21-09e6576dc4e6
0.0.1
hfopenllm_v2/allknowingroger_Neuralcoven-7B-slerp/1762652579.995681
1762652579.995682
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Neuralcoven-7B-slerp
allknowingroger/Neuralcoven-7B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3858584112377381}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
allknowingroger
allknowingroger/Yi-blossom-40B
b35eaca2-0f77-4171-bbcf-23a191b055f2
0.0.1
hfopenllm_v2/allknowingroger_Yi-blossom-40B/1762652580.004046
1762652580.0040479
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Yi-blossom-40B
allknowingroger/Yi-blossom-40B
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.20088587170928693}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 18.769}
HF Open LLM v2
allknowingroger
allknowingroger/Meme-7B-slerp
8eaa7d3f-0217-4ed3-9367-9e0f9c0926fe
0.0.1
hfopenllm_v2/allknowingroger_Meme-7B-slerp/1762652579.9900281
1762652579.990029
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Meme-7B-slerp
allknowingroger/Meme-7B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5163754393897082}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
allknowingroger
allknowingroger/Strangecoven-7B-slerp
f125c8d1-57f3-4b79-ace4-2104b008a507
0.0.1
hfopenllm_v2/allknowingroger_Strangecoven-7B-slerp/1762652580.002888
1762652580.002889
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Strangecoven-7B-slerp
allknowingroger/Strangecoven-7B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37464261492839}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH",...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
allknowingroger
allknowingroger/Chocolatine-24B
9d3d89f9-e792-4b33-91d1-41f84ca1cc68
0.0.1
hfopenllm_v2/allknowingroger_Chocolatine-24B/1762652579.9856288
1762652579.98563
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Chocolatine-24B
allknowingroger/Chocolatine-24B
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.19581488229010136}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "Phi3ForCausalLM", "params_billions": 24.184}
HF Open LLM v2
allknowingroger
allknowingroger/MultiMerge-7B-slerp
f0aae363-f838-48c8-bf9e-b8e9f0e84a24
0.0.1
hfopenllm_v2/allknowingroger_MultiMerge-7B-slerp/1762652579.994297
1762652579.994299
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/MultiMerge-7B-slerp
allknowingroger/MultiMerge-7B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3947758613811354}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
allknowingroger
allknowingroger/WestlakeMaziyar-7B-slerp
2db948db-a9e5-41cf-9567-2f9198d80900
0.0.1
hfopenllm_v2/allknowingroger_WestlakeMaziyar-7B-slerp/1762652580.003291
1762652580.0032918
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/WestlakeMaziyar-7B-slerp
allknowingroger/WestlakeMaziyar-7B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48377748817581795}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
allknowingroger
allknowingroger/Ph3task3-14B
a935c0d1-6623-45c6-a100-96c8b5a3a2fb
0.0.1
hfopenllm_v2/allknowingroger_Ph3task3-14B/1762652579.997498
1762652579.997499
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Ph3task3-14B
allknowingroger/Ph3task3-14B
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4962421929369628}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "float16", "architecture": "Phi3ForCausalLM", "params_billions": 13.96}
HF Open LLM v2
allknowingroger
allknowingroger/MultiverseEx26-7B-slerp
30b74d3f-7247-4c93-9c94-dc8beba14b70
0.0.1
hfopenllm_v2/allknowingroger_MultiverseEx26-7B-slerp/1762652579.995038
1762652579.995039
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/MultiverseEx26-7B-slerp
allknowingroger/MultiverseEx26-7B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3938516469633905}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
allknowingroger
allknowingroger/Yibuddy-35B
dc2688b9-9dff-4a2e-b3d8-3bdc82634d20
0.0.1
hfopenllm_v2/allknowingroger_Yibuddy-35B/1762652580.004411
1762652580.004412
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Yibuddy-35B
allknowingroger/Yibuddy-35B
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4234774841864032}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 34.389}
HF Open LLM v2
allknowingroger
allknowingroger/Ph3merge-14B
95228f47-8fb1-443c-8ad4-0021504e34e0
0.0.1
hfopenllm_v2/allknowingroger_Ph3merge-14B/1762652579.996419
1762652579.9964201
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Ph3merge-14B
allknowingroger/Ph3merge-14B
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27012881376968667}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "Phi3ForCausalLM", "params_billions": 13.619}
HF Open LLM v2
allknowingroger
allknowingroger/Yi-1.5-34B
98455065-72e1-4dad-bce1-1c3ceddf5433
0.0.1
hfopenllm_v2/allknowingroger_Yi-1.5-34B/1762652580.0036852
1762652580.003686
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Yi-1.5-34B
allknowingroger/Yi-1.5-34B
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16391618682872555}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 34.389}
HF Open LLM v2
allknowingroger
allknowingroger/limyClown-7B-slerp
420f8334-c420-4b8f-8853-fea8f4f5ac6d
0.0.1
hfopenllm_v2/allknowingroger_limyClown-7B-slerp/1762652580.005876
1762652580.005877
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/limyClown-7B-slerp
allknowingroger/limyClown-7B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4017451473202215}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
allknowingroger
allknowingroger/Ministral-8B-slerp
effba194-3b2a-4847-9708-e3cb62a7c964
0.0.1
hfopenllm_v2/allknowingroger_Ministral-8B-slerp/1762652579.990243
1762652579.9902442
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Ministral-8B-slerp
allknowingroger/Ministral-8B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.19608970863974257}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.248}
HF Open LLM v2
allknowingroger
allknowingroger/Ph3della5-14B
d5a47313-b2f5-4833-9539-b8f56e4a5fda
0.0.1
hfopenllm_v2/allknowingroger_Ph3della5-14B/1762652579.9961941
1762652579.996195
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Ph3della5-14B
allknowingroger/Ph3della5-14B
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47985567183960776}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "Phi3ForCausalLM", "params_billions": 13.96}
HF Open LLM v2
allknowingroger
allknowingroger/Marco-01-slerp1-7B
1b8abf32-6b66-4e9b-9b82-e1978d07a483
0.0.1
hfopenllm_v2/allknowingroger_Marco-01-slerp1-7B/1762652579.989768
1762652579.98977
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Marco-01-slerp1-7B
allknowingroger/Marco-01-slerp1-7B
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46811571075856506}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
HF Open LLM v2
allknowingroger
allknowingroger/MultiMash8-13B-slerp
54a836bc-8048-4c2b-a65a-937acc2fa414
0.0.1
hfopenllm_v2/allknowingroger_MultiMash8-13B-slerp/1762652579.9938078
1762652579.99381
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/MultiMash8-13B-slerp
allknowingroger/MultiMash8-13B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4320702402957486}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 12.879}
HF Open LLM v2
allknowingroger
allknowingroger/HomerSlerp3-7B
a8a69b0c-02c9-437d-975d-69f1ddc6959a
0.0.1
hfopenllm_v2/allknowingroger_HomerSlerp3-7B/1762652579.988729
1762652579.9887302
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/HomerSlerp3-7B
allknowingroger/HomerSlerp3-7B
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4362668829815999}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
HF Open LLM v2
allknowingroger
allknowingroger/MultiMash5-12B-slerp
df7621bc-5af2-45c5-b8e4-ebc158dad966
0.0.1
hfopenllm_v2/allknowingroger_MultiMash5-12B-slerp/1762652579.992772
1762652579.992772
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/MultiMash5-12B-slerp
allknowingroger/MultiMash5-12B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41415998439695567}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 12.879}
HF Open LLM v2
allknowingroger
allknowingroger/Ph3unsloth-3B-slerp
0a9be33a-792e-413c-b60d-3e97a060fa78
0.0.1
hfopenllm_v2/allknowingroger_Ph3unsloth-3B-slerp/1762652579.99772
1762652579.99772
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Ph3unsloth-3B-slerp
allknowingroger/Ph3unsloth-3B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.18944511673470835}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 3.821}
HF Open LLM v2
allknowingroger
allknowingroger/Yislerp2-34B
ce55aca1-80bd-4711-ad05-d812d206bd14
0.0.1
hfopenllm_v2/allknowingroger_Yislerp2-34B/1762652580.005196
1762652580.005197
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Yislerp2-34B
allknowingroger/Yislerp2-34B
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39994658616914236}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 34.389}
HF Open LLM v2
allknowingroger
allknowingroger/Yunconglong-13B-slerp
8ae47af1-5ae6-4cb9-ac94-8d70fda5126d
0.0.1
hfopenllm_v2/allknowingroger_Yunconglong-13B-slerp/1762652580.005601
1762652580.005603
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/Yunconglong-13B-slerp
allknowingroger/Yunconglong-13B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42417673993891764}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 12.879}
HF Open LLM v2
allknowingroger
allknowingroger/MultiMash7-12B-slerp
141507b5-67df-4c38-9eeb-b9d3cf98b08f
0.0.1
hfopenllm_v2/allknowingroger_MultiMash7-12B-slerp/1762652579.993205
1762652579.993206
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
allknowingroger/MultiMash7-12B-slerp
allknowingroger/MultiMash7-12B-slerp
allknowingroger
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42127887338927383}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 12.879}
HF Open LLM v2
raphgg
raphgg/test-2.5-72B
133866e4-6e3a-4d88-95f3-d7e1bd414988
0.0.1
hfopenllm_v2/raphgg_test-2.5-72B/1762652580.489263
1762652580.489265
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
raphgg/test-2.5-72B
raphgg/test-2.5-72B
raphgg
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8437047035199936}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 72.706}
HF Open LLM v2
uukuguy
uukuguy/speechless-zephyr-code-functionary-7b
82346a60-f31e-45ba-9fae-bd738321f390
0.0.1
hfopenllm_v2/uukuguy_speechless-zephyr-code-functionary-7b/1762652580.583915
1762652580.583916
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
uukuguy/speechless-zephyr-code-functionary-7b
uukuguy/speechless-zephyr-code-functionary-7b
uukuguy
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2695791610704043}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
uukuguy
uukuguy/speechless-coder-ds-6.7b
a3ba5a65-b137-42ad-868b-9aa5c24afd07
0.0.1
hfopenllm_v2/uukuguy_speechless-coder-ds-6.7b/1762652580.582827
1762652580.582828
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
uukuguy/speechless-coder-ds-6.7b
uukuguy/speechless-coder-ds-6.7b
uukuguy
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25046986440422525}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 6.7}
HF Open LLM v2
uukuguy
uukuguy/speechless-instruct-mistral-7b-v0.2
e115938d-d343-4c03-8f3b-4d86768b2e49
0.0.1
hfopenllm_v2/uukuguy_speechless-instruct-mistral-7b-v0.2/1762652580.5831082
1762652580.5831091
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
uukuguy/speechless-instruct-mistral-7b-v0.2
uukuguy/speechless-instruct-mistral-7b-v0.2
uukuguy
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3261324397044287}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH...
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242}
HF Open LLM v2
LLM4Binary
LLM4Binary/llm4decompile-1.3b-v2
86f0a81b-69da-4f36-a6b0-8a36f79d5c1c
0.0.1
hfopenllm_v2/LLM4Binary_llm4decompile-1.3b-v2/1762652579.7068748
1762652579.706877
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
LLM4Binary/llm4decompile-1.3b-v2
LLM4Binary/llm4decompile-1.3b-v2
LLM4Binary
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.22678936333373229}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.346}
HF Open LLM v2
odyssey-labs
odyssey-labs/Astral-1-10B
4fefa5ae-d421-4883-b734-d6cc8bd8f4d6
0.0.1
hfopenllm_v2/odyssey-labs_Astral-1-10B/1762652580.417092
1762652580.417093
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
HF Open LLM v2
leaderboard
Hugging Face
null
null
third_party
odyssey-labs/Astral-1-10B
odyssey-labs/Astral-1-10B
odyssey-labs
unknown
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38780657544204933}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BB...
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 10.732}