_leaderboard stringclasses 1
value | _developer stringclasses 559
values | _model stringlengths 9 102 | _uuid stringlengths 36 36 | schema_version stringclasses 1
value | evaluation_id stringlengths 35 133 | retrieved_timestamp stringlengths 13 18 | source_data stringclasses 1
value | evaluation_source_name stringclasses 1
value | evaluation_source_type stringclasses 1
value | source_organization_name stringclasses 1
value | source_organization_url null | source_organization_logo_url null | evaluator_relationship stringclasses 1
value | model_name stringlengths 4 102 | model_id stringlengths 9 102 | model_developer stringclasses 559
values | model_inference_platform stringclasses 1
value | evaluation_results stringlengths 1.35k 1.41k | additional_details stringclasses 660
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
HF Open LLM v2 | jeonsworld | jeonsworld/CarbonVillain-en-10.7B-v4 | bd67084e-d9ca-43c4-ab6e-3fbe8a1fb782 | 0.0.1 | hfopenllm_v2/jeonsworld_CarbonVillain-en-10.7B-v4/1762652580.2876348 | 1762652580.287636 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | jeonsworld/CarbonVillain-en-10.7B-v4 | jeonsworld/CarbonVillain-en-10.7B-v4 | jeonsworld | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45792386423578324}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.516795955873779}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.04682779456193353}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3062080536912752}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3965416666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31416223404255317}}] | {"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 10.732} |
HF Open LLM v2 | Kukedlc | Kukedlc/NeuralSynthesis-7B-v0.3 | b3412f38-d0bc-47c9-a750-14bdbf4e65d8 | 0.0.1 | hfopenllm_v2/Kukedlc_NeuralSynthesis-7B-v0.3/1762652579.702864 | 1762652579.702865 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | Kukedlc/NeuralSynthesis-7B-v0.3 | Kukedlc/NeuralSynthesis-7B-v0.3 | Kukedlc | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4078400865259733}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5138078814382175}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.07779456193353475}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2802013422818792}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4345833333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30501994680851063}}] | {"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242} |
HF Open LLM v2 | Kukedlc | Kukedlc/NeuralSynthesis-7B-v0.1 | 3d2603e3-d556-48e8-ba94-555faf9f1807 | 0.0.1 | hfopenllm_v2/Kukedlc_NeuralSynthesis-7B-v0.1/1762652579.7026482 | 1762652579.702649 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | Kukedlc/NeuralSynthesis-7B-v0.1 | Kukedlc/NeuralSynthesis-7B-v0.1 | Kukedlc | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4184563624516283}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5144745481048844}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0634441087613293}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28104026845637586}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43328125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.304936835106383}}] | {"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242} |
HF Open LLM v2 | Kukedlc | Kukedlc/NeuralSynthesis-7b-v0.4-slerp | 4e30bf00-f6b7-4c28-8cf8-dc64427fb958 | 0.0.1 | hfopenllm_v2/Kukedlc_NeuralSynthesis-7b-v0.4-slerp/1762652579.7030761 | 1762652579.703077 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | Kukedlc/NeuralSynthesis-7b-v0.4-slerp | Kukedlc/NeuralSynthesis-7b-v0.4-slerp | Kukedlc | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3947259936967247}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5142932549151301}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.06268882175226587}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27768456375838924}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43324999999999997}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3042719414893617}}] | {"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242} |
HF Open LLM v2 | Kukedlc | Kukedlc/NeuralExperiment-7b-MagicCoder-v7.5 | 4775e169-e3a7-41b6-bf1e-a7e8e0edb4fc | 0.0.1 | hfopenllm_v2/Kukedlc_NeuralExperiment-7b-MagicCoder-v7.5/1762652579.701928 | 1762652579.7019289 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | Kukedlc/NeuralExperiment-7b-MagicCoder-v7.5 | Kukedlc/NeuralExperiment-7b-MagicCoder-v7.5 | Kukedlc | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4552509563513699}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3988446544778517}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.06646525679758308}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2961409395973154}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4281979166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2824135638297872}}] | {"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242} |
HF Open LLM v2 | thirdeyeai | thirdeyeai/elevate360m | 013a9bf9-7b9e-4084-b7a2-bb77ad0c18e1 | 0.0.1 | hfopenllm_v2/thirdeyeai_elevate360m/1762652580.565248 | 1762652580.565249 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | thirdeyeai/elevate360m | thirdeyeai/elevate360m | thirdeyeai | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.04448862351892978}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2962583602962783}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.015861027190332326}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2407718120805369}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34621875}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1077127659574468}}] | {"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 0.362} |
HF Open LLM v2 | jieliu | jieliu/Storm-7B | f521cb33-487e-4636-9039-fe1af3e090f2 | 0.0.1 | hfopenllm_v2/jieliu_Storm-7B/1762652580.288308 | 1762652580.288309 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | jieliu/Storm-7B | jieliu/Storm-7B | jieliu | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3424192254329623}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5187285371254579}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.06117824773413897}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30788590604026844}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4428958333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3119182180851064}}] | {"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242} |
HF Open LLM v2 | neopolita | neopolita/jessi-v0.2-falcon3-10b-instruct | 4a73436e-e2b7-4c03-b4b2-80d0ed8e389a | 0.0.1 | hfopenllm_v2/neopolita_jessi-v0.2-falcon3-10b-instruct/1762652580.390252 | 1762652580.390252 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | neopolita/jessi-v0.2-falcon3-10b-instruct | neopolita/jessi-v0.2-falcon3-10b-instruct | neopolita | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7768099753099553}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6204846671314362}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2122356495468278}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3288590604026846}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42813541666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4354222074468085}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 10.306} |
HF Open LLM v2 | neopolita | neopolita/jessi-v0.4-falcon3-7b-instruct | 514b1b8c-d80a-4851-afec-e04968b2e733 | 0.0.1 | hfopenllm_v2/neopolita_jessi-v0.4-falcon3-7b-instruct/1762652580.39086 | 1762652580.390861 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | neopolita/jessi-v0.4-falcon3-7b-instruct | neopolita/jessi-v0.4-falcon3-7b-instruct | neopolita | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7603735865281896}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5521668757306609}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3768882175226586}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3028523489932886}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49712500000000004}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40043218085106386}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 7.456} |
HF Open LLM v2 | neopolita | neopolita/jessi-v0.1-bf16-falcon3-7b-instruct | 5063eae6-e8f3-41c6-ab11-cfcc4a0a0cf3 | 0.0.1 | hfopenllm_v2/neopolita_jessi-v0.1-bf16-falcon3-7b-instruct/1762652580.389358 | 1762652580.389359 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | neopolita/jessi-v0.1-bf16-falcon3-7b-instruct | neopolita/jessi-v0.1-bf16-falcon3-7b-instruct | neopolita | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7527050448365891}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5516128933222162}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3806646525679758}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3028523489932886}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48248958333333336}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3923703457446808}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 7.456} |
HF Open LLM v2 | neopolita | neopolita/jessi-v0.3-falcon3-7b-instruct | 95281cbf-6f27-4e17-b21f-9a0604d5629b | 0.0.1 | hfopenllm_v2/neopolita_jessi-v0.3-falcon3-7b-instruct/1762652580.390663 | 1762652580.3906639 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | neopolita/jessi-v0.3-falcon3-7b-instruct | neopolita/jessi-v0.3-falcon3-7b-instruct | neopolita | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7509064836855099}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.538793502664194}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.18882175226586104}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3196308724832215}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46915625}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3970246010638298}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 7.456} |
HF Open LLM v2 | neopolita | neopolita/jessi-v0.5-falcon3-7b-instruct | 6736897b-390a-4c19-8a04-9b606c1705b1 | 0.0.1 | hfopenllm_v2/neopolita_jessi-v0.5-falcon3-7b-instruct/1762652580.391073 | 1762652580.391074 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | neopolita/jessi-v0.5-falcon3-7b-instruct | neopolita/jessi-v0.5-falcon3-7b-instruct | neopolita | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7411645544931892}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5589627302276082}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37386706948640486}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.311241610738255}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48652083333333335}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3966090425531915}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 7.456} |
HF Open LLM v2 | neopolita | neopolita/loki-v0.1-virtuoso | 907047d7-1767-4009-8e04-02f5dc366355 | 0.0.1 | hfopenllm_v2/neopolita_loki-v0.1-virtuoso/1762652580.3914938 | 1762652580.391495 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | neopolita/loki-v0.1-virtuoso | neopolita/loki-v0.1-virtuoso | neopolita | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7819308324135517}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6467251502613163}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3391238670694864}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35067114093959734}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43753125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5128823138297872}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.77} |
HF Open LLM v2 | neopolita | neopolita/jessi-v0.1-virtuoso-small | b4630d14-950d-4dbf-8897-74d46dd51130 | 0.0.1 | hfopenllm_v2/neopolita_jessi-v0.1-virtuoso-small/1762652580.3900428 | 1762652580.3900428 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | neopolita/jessi-v0.1-virtuoso-small | neopolita/jessi-v0.1-virtuoso-small | neopolita | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7959192719761344}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6442861439957068}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33987915407854985}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33053691275167785}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43616666666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5129654255319149}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.77} |
HF Open LLM v2 | neopolita | neopolita/jessi-v0.2-falcon3-7b-instruct | bd8025f1-66d4-4644-af1b-ca5366a32964 | 0.0.1 | hfopenllm_v2/neopolita_jessi-v0.2-falcon3-7b-instruct/1762652580.39046 | 1762652580.39046 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | neopolita/jessi-v0.2-falcon3-7b-instruct | neopolita/jessi-v0.2-falcon3-7b-instruct | neopolita | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5770754930251731}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5363079188886575}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2537764350453172}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31711409395973156}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44788541666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3904587765957447}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 7.456} |
HF Open LLM v2 | neopolita | neopolita/jessi-v0.1-qwen2.5-7b-instruct | 9b1f077d-5893-417c-ac87-1d0beb39b750 | 0.0.1 | hfopenllm_v2/neopolita_jessi-v0.1-qwen2.5-7b-instruct/1762652580.3898308 | 1762652580.3898308 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | neopolita/jessi-v0.1-qwen2.5-7b-instruct | neopolita/jessi-v0.1-qwen2.5-7b-instruct | neopolita | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7326715337526651}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5292315105257686}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4086102719033233}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29697986577181207}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3913645833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42278922872340424}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616} |
HF Open LLM v2 | neopolita | neopolita/jessi-v0.1-falcon3-10b-instruct | c2ee0925-6e4a-4d3b-80be-b8b98156e3db | 0.0.1 | hfopenllm_v2/neopolita_jessi-v0.1-falcon3-10b-instruct/1762652580.389616 | 1762652580.389617 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | neopolita/jessi-v0.1-falcon3-10b-instruct | neopolita/jessi-v0.1-falcon3-10b-instruct | neopolita | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.755152994055772}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5952883626256132}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2001510574018127}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3187919463087248}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42785416666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4187998670212766}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 10.306} |
HF Open LLM v2 | neopolita | neopolita/jessi-v0.6-falcon3-7b-instruct | 5b934386-a0e9-437d-bf9e-a51074415a1e | 0.0.1 | hfopenllm_v2/neopolita_jessi-v0.6-falcon3-7b-instruct/1762652580.391277 | 1762652580.391277 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | neopolita/jessi-v0.6-falcon3-7b-instruct | neopolita/jessi-v0.6-falcon3-7b-instruct | neopolita | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7401904723910335}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5508818723957883}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3564954682779456}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30033557046979864}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49042708333333335}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3956948138297872}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 7.456} |
HF Open LLM v2 | iRyanBell | iRyanBell/ARC1 | 62f9b47d-2860-44b3-8abb-3d441f4bdeb4 | 0.0.1 | hfopenllm_v2/iRyanBell_ARC1/1762652580.204204 | 1762652580.204204 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | iRyanBell/ARC1 | iRyanBell/ARC1 | iRyanBell | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.441112913735555}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4902999658144703}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.06873111782477341}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29446308724832215}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3990520833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3371010638297872}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | iRyanBell | iRyanBell/ARC1-II | 19afc23f-5849-4147-b240-9bb7ddea4d58 | 0.0.1 | hfopenllm_v2/iRyanBell_ARC1-II/1762652580.204559 | 1762652580.204561 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | iRyanBell/ARC1-II | iRyanBell/ARC1-II | iRyanBell | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17083560508340093}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33817781029884353}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.02190332326283988}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27181208053691275}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4912916666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1685505319148936}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | gradientai | gradientai/Llama-3-8B-Instruct-Gradient-1048k | 79d366fc-e21c-4e5e-bb94-8d221d9df715 | 0.0.1 | hfopenllm_v2/gradientai_Llama-3-8B-Instruct-Gradient-1048k/1762652580.181334 | 1762652580.181335 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | gradientai/Llama-3-8B-Instruct-Gradient-1048k | gradientai/Llama-3-8B-Instruct-Gradient-1048k | gradientai | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4455588948434598}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4345903107069573}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.05362537764350453}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27768456375838924}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42975}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29404920212765956}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/pre-cursa-o1-v1.6 | 50627b31-a8d4-401a-8449-5f33cfb17893 | 0.0.1 | hfopenllm_v2/marcuscedricridia_pre-cursa-o1-v1.6/1762652580.340074 | 1762652580.340075 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/pre-cursa-o1-v1.6 | marcuscedricridia/pre-cursa-o1-v1.6 | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7527549125209998}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5473342320067097}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32046979865771813}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4233645833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44132313829787234}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/pre-cursa-o1-v1.3 | f86cf126-4fb3-4419-82bf-e5c0168e25cb | 0.0.1 | hfopenllm_v2/marcuscedricridia_pre-cursa-o1-v1.3/1762652580.339683 | 1762652580.339684 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/pre-cursa-o1-v1.3 | marcuscedricridia/pre-cursa-o1-v1.3 | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7506815250202795}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5454519705653261}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5075528700906344}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31291946308724833}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42714583333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4419880319148936}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/stray-r1o-et | cbf68d01-b993-4bcd-b174-23e3b6e28d3a | 0.0.1 | hfopenllm_v2/marcuscedricridia_stray-r1o-et/1762652580.340682 | 1762652580.340683 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/stray-r1o-et | marcuscedricridia/stray-r1o-et | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.15622215720953736}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2967459956151434}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.004531722054380665}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26174496644295303}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4085729166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.109375}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/post-cursa-o1 | c9632855-db4e-40bb-b140-2ff524d31fd2 | 0.0.1 | hfopenllm_v2/marcuscedricridia_post-cursa-o1/1762652580.3390641 | 1762652580.339065 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/post-cursa-o1 | marcuscedricridia/post-cursa-o1 | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7628215357473725}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5479692437233474}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4871601208459215}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30956375838926176}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43514583333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4360871010638298}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/r1o-et | 84de36db-b427-40c4-80f6-2114c8ad4e4f | 0.0.1 | hfopenllm_v2/marcuscedricridia_r1o-et/1762652580.340277 | 1762652580.340277 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/r1o-et | marcuscedricridia/r1o-et | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3596800932636516}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42092007019831174}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.07930513595166164}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2726510067114094}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3579375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2579787234042553}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/Cheng-2 | dbadece3-665b-423b-b2d9-e74d7c676133 | 0.0.1 | hfopenllm_v2/marcuscedricridia_Cheng-2/1762652580.332486 | 1762652580.3324869 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/Cheng-2 | marcuscedricridia/Cheng-2 | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8337378156624423}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6498988582965893}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5438066465256798}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34563758389261745}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41933333333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5013297872340425}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/cursa-o1-7b-2-28-2025 | 2a0bcf8c-cf70-4d13-a713-67054bc98412 | 0.0.1 | hfopenllm_v2/marcuscedricridia_cursa-o1-7b-2-28-2025/1762652580.3360791 | 1762652580.3360798 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/cursa-o1-7b-2-28-2025 | marcuscedricridia/cursa-o1-7b-2-28-2025 | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7467098409996586}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.538413713363387}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4811178247734139}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3070469798657718}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42733333333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4365026595744681}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/Cheng-1 | 7aa1c718-9ac6-426b-be50-5c7f37849b90 | 0.0.1 | hfopenllm_v2/marcuscedricridia_Cheng-1/1762652580.332221 | 1762652580.332222 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/Cheng-1 | marcuscedricridia/Cheng-1 | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7788833628106757}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5524677845280024}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48942598187311176}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2961409395973154}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4073333333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43492353723404253}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/olmner-7b | 5064ebea-3ec3-4344-867f-e33f8937d096 | 0.0.1 | hfopenllm_v2/marcuscedricridia_olmner-7b/1762652580.338225 | 1762652580.338225 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/olmner-7b | marcuscedricridia/olmner-7b | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7253775537795273}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5471591805569388}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46299093655589124}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30788590604026844}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43796875}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4309341755319149}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/etr1o-explicit-v1.1 | 02fe0385-223e-4578-b3fb-d6819f783861 | 0.0.1 | hfopenllm_v2/marcuscedricridia_etr1o-explicit-v1.1/1762652580.337136 | 1762652580.337137 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/etr1o-explicit-v1.1 | marcuscedricridia/etr1o-explicit-v1.1 | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28803906966847964}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31316553135589525}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.004531722054380665}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27768456375838924}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4110520833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11951462765957446}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/olmner-o1-7b | b1669ad9-450f-4a93-8094-26f427beb49f | 0.0.1 | hfopenllm_v2/marcuscedricridia_olmner-o1-7b/1762652580.338658 | 1762652580.338659 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/olmner-o1-7b | marcuscedricridia/olmner-o1-7b | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7527549125209998}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5480873056178129}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49244712990936557}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3011744966442953}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42990625}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43858045212765956}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/pre-cursa-o1 | 51fc3a16-67c2-448b-9854-07ab8adc4dea | 0.0.1 | hfopenllm_v2/marcuscedricridia_pre-cursa-o1/1762652580.3392608 | 1762652580.339262 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/pre-cursa-o1 | marcuscedricridia/pre-cursa-o1 | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.740889728143548}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5461688442794247}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5037764350453172}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30956375838926176}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42596875}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4424035904255319}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/pre-cursa-o1-v1.4 | 4ed1f68a-6bc9-4621-beb1-3d274247cdb6 | 0.0.1 | hfopenllm_v2/marcuscedricridia_pre-cursa-o1-v1.4/1762652580.3398788 | 1762652580.33988 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/pre-cursa-o1-v1.4 | marcuscedricridia/pre-cursa-o1-v1.4 | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.748783228500379}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5493014138981462}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48338368580060426}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3053691275167785}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42851041666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4435671542553192}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/Cheng-2-v1.1 | a720e9bc-e8dd-4b7a-8d22-7b9f4b42ebe0 | 0.0.1 | hfopenllm_v2/marcuscedricridia_Cheng-2-v1.1/1762652580.332704 | 1762652580.332705 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/Cheng-2-v1.1 | marcuscedricridia/Cheng-2-v1.1 | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8269934883885868}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6510142192324059}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5392749244712991}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34312080536912754}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41672916666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5076462765957447}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/olmner-della-7b | 062e407e-7820-459f-83da-b670f8adff9d | 0.0.1 | hfopenllm_v2/marcuscedricridia_olmner-della-7b/1762652580.338445 | 1762652580.3384461 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/olmner-della-7b | marcuscedricridia/olmner-della-7b | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7636958824807067}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5491231851969524}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4962235649546828}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3011744966442953}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4207604166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43858045212765956}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/absolute-o1-7b | 4e9eef3d-b851-41de-a3b2-88950f1d426f | 0.0.1 | hfopenllm_v2/marcuscedricridia_absolute-o1-7b/1762652580.335638 | 1762652580.335639 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/absolute-o1-7b | marcuscedricridia/absolute-o1-7b | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7515558717536137}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5469413884153854}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5083081570996979}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3196308724832215}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4113645833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44132313829787234}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/cursa-o1-7b | 0f7f339a-5523-4551-ba77-4fe34779d017 | 0.0.1 | hfopenllm_v2/marcuscedricridia_cursa-o1-7b/1762652580.335863 | 1762652580.335863 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/cursa-o1-7b | marcuscedricridia/cursa-o1-7b | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7628215357473725}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5465860023973769}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4954682779456193}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3070469798657718}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4300625}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4392453457446808}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/pre-cursa-o1-v1.2 | 9db3b6b0-7cc8-48b6-85f5-1662cad07fae | 0.0.1 | hfopenllm_v2/marcuscedricridia_pre-cursa-o1-v1.2/1762652580.339467 | 1762652580.339468 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/pre-cursa-o1-v1.2 | marcuscedricridia/pre-cursa-o1-v1.2 | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7548781677061308}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5486788313377599}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.506797583081571}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31291946308724833}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42723958333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4402426861702128}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/fan-o1-7b | 9693b68f-ac5c-4111-804c-0505ec8bf06d | 0.0.1 | hfopenllm_v2/marcuscedricridia_fan-o1-7b/1762652580.338023 | 1762652580.338024 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/fan-o1-7b | marcuscedricridia/fan-o1-7b | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4455588948434598}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4849058892394324}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16163141993957703}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28439597315436244}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3833645833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3273769946808511}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/etr1o-explicit-v1.2 | 3ec5106d-86be-48a8-bb3d-6574b6971641 | 0.0.1 | hfopenllm_v2/marcuscedricridia_etr1o-explicit-v1.2/1762652580.337388 | 1762652580.337389 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/etr1o-explicit-v1.2 | marcuscedricridia/etr1o-explicit-v1.2 | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1504020443534267}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29497368605886115}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2609060402684564}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40311458333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11261635638297872}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/etr1o-v1.1 | cd68d6d9-a5c7-4f32-b372-0e954af830ad | 0.0.1 | hfopenllm_v2/marcuscedricridia_etr1o-v1.1/1762652580.3376079 | 1762652580.337609 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/etr1o-v1.1 | marcuscedricridia/etr1o-v1.1 | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.15971954414287426}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31003625778742805}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25671140939597314}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40165625}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11569148936170212}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/etr1o-v1.2 | 81b5a281-9dc6-4ae5-8079-d0e308a20c8e | 0.0.1 | hfopenllm_v2/marcuscedricridia_etr1o-v1.2/1762652580.337824 | 1762652580.337825 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/etr1o-v1.2 | marcuscedricridia/etr1o-v1.2 | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7286998497320443}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6349035922791185}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35876132930513593}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37583892617449666}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4714479166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5315824468085106}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/olmner-sbr-7b | afb014ed-a2e6-46b9-9ee9-a6a1f52e43cf | 0.0.1 | hfopenllm_v2/marcuscedricridia_olmner-sbr-7b/1762652580.338864 | 1762652580.3388648 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/olmner-sbr-7b | marcuscedricridia/olmner-sbr-7b | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7600488924941378}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5461642048146724}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4947129909365559}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3087248322147651}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4153645833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4412400265957447}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/cursorr-o1.2-7b | 51cd189c-82a8-4475-8df5-9a855394274a | 0.0.1 | hfopenllm_v2/marcuscedricridia_cursorr-o1.2-7b/1762652580.336929 | 1762652580.336929 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/cursorr-o1.2-7b | marcuscedricridia/cursorr-o1.2-7b | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1659895743294459}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3068134113454804}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25419463087248323}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35384375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.10804521276595745}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/sbr-o1-7b | 05666c00-3b8c-48f3-9e36-bc9a116bb0c6 | 0.0.1 | hfopenllm_v2/marcuscedricridia_sbr-o1-7b/1762652580.340477 | 1762652580.340478 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/sbr-o1-7b | marcuscedricridia/sbr-o1-7b | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7454609325478618}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5478826565229475}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4984894259818731}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3104026845637584}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4404166666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43550531914893614}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/cursa-o1-7b-v1.1 | f24a1f02-da21-49f0-91b9-65df4fd770db | 0.0.1 | hfopenllm_v2/marcuscedricridia_cursa-o1-7b-v1.1/1762652580.336299 | 1762652580.3363001 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/cursa-o1-7b-v1.1 | marcuscedricridia/cursa-o1-7b-v1.1 | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7527549125209998}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5492557305346194}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4984894259818731}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3070469798657718}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.425875}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43916223404255317}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/cursor-o1-7b | 764c4dcb-caea-418c-b206-ee401ea0d979 | 0.0.1 | hfopenllm_v2/marcuscedricridia_cursor-o1-7b/1762652580.3367229 | 1762652580.336724 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/cursor-o1-7b | marcuscedricridia/cursor-o1-7b | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4106880853912065}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5007453242508472}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14123867069486404}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28104026845637586}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41009375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32513297872340424}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616} |
HF Open LLM v2 | marcuscedricridia | marcuscedricridia/cursa-o1-7b-v1.2-normalize-false | 2632f42e-cbe3-4c55-b434-f4a239aeffa4 | 0.0.1 | hfopenllm_v2/marcuscedricridia_cursa-o1-7b-v1.2-normalize-false/1762652580.3365178 | 1762652580.3365178 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | marcuscedricridia/cursa-o1-7b-v1.2-normalize-false | marcuscedricridia/cursa-o1-7b-v1.2-normalize-false | marcuscedricridia | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7615726272955757}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5492349810703803}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49924471299093653}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3070469798657718}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4272708333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4435671542553192}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.613} |
HF Open LLM v2 | BAAI | BAAI/Infinity-Instruct-3M-0625-Llama3-70B | 73eb53bc-a090-4415-8fdc-a767a2e00188 | 0.0.1 | hfopenllm_v2/BAAI_Infinity-Instruct-3M-0625-Llama3-70B/1762652579.4887528 | 1762652579.488755 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | BAAI/Infinity-Instruct-3M-0625-Llama3-70B | BAAI/Infinity-Instruct-3M-0625-Llama3-70B | BAAI | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7442120240960651}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6670337872930245}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.22507552870090636}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3573825503355705}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46165625000000005}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4586103723404255}}] | {"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 70.554} |
HF Open LLM v2 | BAAI | BAAI/OPI-Llama-3.1-8B-Instruct | 567f27f3-3f64-4054-aa67-684c29e4d71a | 0.0.1 | hfopenllm_v2/BAAI_OPI-Llama-3.1-8B-Instruct/1762652579.490996 | 1762652579.490996 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | BAAI/OPI-Llama-3.1-8B-Instruct | BAAI/OPI-Llama-3.1-8B-Instruct | BAAI | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.20745510800232272}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3551224419497605}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.013595166163141994}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27432885906040266}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3233020833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.21243351063829788}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | BAAI | BAAI/Infinity-Instruct-3M-0613-Mistral-7B | 9d9ac91a-f339-41a4-ae91-3dba41b06382 | 0.0.1 | hfopenllm_v2/BAAI_Infinity-Instruct-3M-0613-Mistral-7B/1762652579.48831 | 1762652579.4883142 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | BAAI/Infinity-Instruct-3M-0613-Mistral-7B | BAAI/Infinity-Instruct-3M-0613-Mistral-7B | BAAI | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5319873491225504}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49582333763258896}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.08157099697885196}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2961409395973154}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4350833333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31607380319148937}}] | {"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242} |
HF Open LLM v2 | BAAI | BAAI/Infinity-Instruct-7M-0729-Llama3_1-8B | eace7f56-b853-436d-a744-bfdb9e227993 | 0.0.1 | hfopenllm_v2/BAAI_Infinity-Instruct-7M-0729-Llama3_1-8B/1762652579.489912 | 1762652579.489913 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | BAAI/Infinity-Instruct-7M-0729-Llama3_1-8B | BAAI/Infinity-Instruct-7M-0729-Llama3_1-8B | BAAI | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6131952109292234}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5077335431381055}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.12764350453172205}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29278523489932884}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35784375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3223902925531915}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | BAAI | BAAI/Infinity-Instruct-3M-0625-Mistral-7B | be3423f2-98f0-414a-b0c3-efd0d60d4cb3 | 0.0.1 | hfopenllm_v2/BAAI_Infinity-Instruct-3M-0625-Mistral-7B/1762652579.489246 | 1762652579.489247 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | BAAI/Infinity-Instruct-3M-0625-Mistral-7B | BAAI/Infinity-Instruct-3M-0625-Mistral-7B | BAAI | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5867420666054957}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4939670574681802}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.07628398791540786}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28691275167785235}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42723958333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3229720744680851}}] | {"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242} |
HF Open LLM v2 | BAAI | BAAI/Infinity-Instruct-3M-0625-Yi-1.5-9B | 8a2d5e9c-7d41-4638-8b8c-58d08fc0912b | 0.0.1 | hfopenllm_v2/BAAI_Infinity-Instruct-3M-0625-Yi-1.5-9B/1762652579.489686 | 1762652579.489687 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | BAAI/Infinity-Instruct-3M-0625-Yi-1.5-9B | BAAI/Infinity-Instruct-3M-0625-Yi-1.5-9B | BAAI | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5185984299436606}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5509115146247398}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16389728096676737}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3540268456375839}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45753125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41181848404255317}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.829} |
HF Open LLM v2 | BAAI | BAAI/Infinity-Instruct-7M-Gen-mistral-7B | 51daf5e7-1d4e-4753-b24b-79273e6f9370 | 0.0.1 | hfopenllm_v2/BAAI_Infinity-Instruct-7M-Gen-mistral-7B/1762652579.490771 | 1762652579.490772 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | BAAI/Infinity-Instruct-7M-Gen-mistral-7B | BAAI/Infinity-Instruct-7M-Gen-mistral-7B | BAAI | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6146690780462506}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4963813586525743}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.08308157099697885}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2902684563758389}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4061875}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3273769946808511}}] | {"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242} |
HF Open LLM v2 | BAAI | BAAI/Infinity-Instruct-7M-Gen-Llama3_1-70B | b04b4e4d-2f15-446b-974f-21f72fd80fe0 | 0.0.1 | hfopenllm_v2/BAAI_Infinity-Instruct-7M-Gen-Llama3_1-70B/1762652579.490346 | 1762652579.490347 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | BAAI/Infinity-Instruct-7M-Gen-Llama3_1-70B | BAAI/Infinity-Instruct-7M-Gen-Llama3_1-70B | BAAI | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7335458804859993}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6695200461367471}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25226586102719034}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37583892617449666}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45390625}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.460688164893617}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554} |
HF Open LLM v2 | BAAI | BAAI/Infinity-Instruct-3M-0613-Llama3-70B | 69cea95c-c167-42f4-a233-f7739f86f6a7 | 0.0.1 | hfopenllm_v2/BAAI_Infinity-Instruct-3M-0613-Llama3-70B/1762652579.487831 | 1762652579.487832 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | BAAI/Infinity-Instruct-3M-0613-Llama3-70B | BAAI/Infinity-Instruct-3M-0613-Llama3-70B | BAAI | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6821134589555713}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6641614484348598}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.21525679758308158}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35822147651006714}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45226041666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47298869680851063}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 70.554} |
HF Open LLM v2 | BAAI | BAAI/Infinity-Instruct-3M-0625-Qwen2-7B | 2390d668-3273-4f58-a0fd-b13b9d9b1651 | 0.0.1 | hfopenllm_v2/BAAI_Infinity-Instruct-3M-0625-Qwen2-7B/1762652579.489471 | 1762652579.489472 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | BAAI/Infinity-Instruct-3M-0625-Qwen2-7B | BAAI/Infinity-Instruct-3M-0625-Qwen2-7B | BAAI | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5553930238434022}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5345911997776569}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.19259818731117825}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31291946308724833}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38876041666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39602726063829785}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616} |
HF Open LLM v2 | BAAI | BAAI/Infinity-Instruct-7M-Gen-Llama3_1-8B | 84f2027c-3e68-489e-902b-2fec6ec8f850 | 0.0.1 | hfopenllm_v2/BAAI_Infinity-Instruct-7M-Gen-Llama3_1-8B/1762652579.4905548 | 1762652579.490556 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | BAAI/Infinity-Instruct-7M-Gen-Llama3_1-8B | BAAI/Infinity-Instruct-7M-Gen-Llama3_1-8B | BAAI | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6131952109292234}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5077335431381055}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.12764350453172205}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29278523489932884}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35784375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3223902925531915}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | BAAI | BAAI/Infinity-Instruct-3M-0625-Llama3-8B | 00d87824-732a-4746-8d9f-ce7b1f45c0ae | 0.0.1 | hfopenllm_v2/BAAI_Infinity-Instruct-3M-0625-Llama3-8B/1762652579.4890082 | 1762652579.489009 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | BAAI/Infinity-Instruct-3M-0625-Llama3-8B | BAAI/Infinity-Instruct-3M-0625-Llama3-8B | BAAI | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6050268842227512}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4954985723563075}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.08836858006042296}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2751677852348993}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37120833333333336}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3252160904255319}}] | {"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | BAAI | BAAI/Infinity-Instruct-7M-0729-mistral-7B | 25477dff-04c5-4cb8-9ad9-3a13448a2a7d | 0.0.1 | hfopenllm_v2/BAAI_Infinity-Instruct-7M-0729-mistral-7B/1762652579.490131 | 1762652579.490131 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | BAAI/Infinity-Instruct-7M-0729-mistral-7B | BAAI/Infinity-Instruct-7M-0729-mistral-7B | BAAI | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6161928128476886}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4963813586525743}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.08308157099697885}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2902684563758389}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4061875}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3273769946808511}}] | {"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242} |
HF Open LLM v2 | Ro-xe | Ro-xe/FMixIA-FrankenMerge-9.5B-PT-9 | 0d1c7e5e-4ddf-447b-9581-c62cedc2fedc | 0.0.1 | hfopenllm_v2/Ro-xe_FMixIA-FrankenMerge-9.5B-PT-9/1762652579.8542862 | 1762652579.8542871 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | Ro-xe/FMixIA-FrankenMerge-9.5B-PT-9 | Ro-xe/FMixIA-FrankenMerge-9.5B-PT-9 | Ro-xe | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.19401632113902223}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5087851148631056}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0030211480362537764}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30788590604026844}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41703124999999996}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36569148936170215}}] | {"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.141} |
HF Open LLM v2 | Ro-xe | Ro-xe/FMixIA-7B-DARE-0 | 93930443-dc12-422f-9920-470917ef8d7d | 0.0.1 | hfopenllm_v2/Ro-xe_FMixIA-7B-DARE-0/1762652579.8536398 | 1762652579.853641 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | Ro-xe/FMixIA-7B-DARE-0 | Ro-xe/FMixIA-7B-DARE-0 | Ro-xe | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3341256754300811}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5035332799973222}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.052870090634441085}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28942953020134227}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45448958333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3016123670212766}}] | {"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242} |
HF Open LLM v2 | Ro-xe | Ro-xe/FMixIA-7B-TIES-1 | b5d64806-0d01-4c99-9ba6-6aff88c894bd | 0.0.1 | hfopenllm_v2/Ro-xe_FMixIA-7B-TIES-1/1762652579.8540852 | 1762652579.8540852 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | Ro-xe/FMixIA-7B-TIES-1 | Ro-xe/FMixIA-7B-TIES-1 | Ro-xe | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34529160405501846}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5091539642456672}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.05664652567975831}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28859060402684567}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46890625}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2992021276595745}}] | {"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242} |
HF Open LLM v2 | Ro-xe | Ro-xe/FMixIA-7B-SLERP-27 | 7f08546a-3f05-4612-879c-3f293daeabd4 | 0.0.1 | hfopenllm_v2/Ro-xe_FMixIA-7B-SLERP-27/1762652579.853882 | 1762652579.8538828 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | Ro-xe/FMixIA-7B-SLERP-27 | Ro-xe/FMixIA-7B-SLERP-27 | Ro-xe | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3765409114482905}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5150591725181265}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0634441087613293}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2953020134228188}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44115624999999997}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30078125}}] | {"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 7.242} |
HF Open LLM v2 | Aryanne | Aryanne/SHBA | a1c56b87-d8d4-4570-9c33-b84dd066d92f | 0.0.1 | hfopenllm_v2/Aryanne_SHBA/1762652579.482961 | 1762652579.482962 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | Aryanne/SHBA | Aryanne/SHBA | Aryanne | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7816560060639104}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5233174837035715}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1797583081570997}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3053691275167785}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41613541666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3892121010638298}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | Aryanne | Aryanne/SuperHeart | c6fae489-9bf8-40e5-a602-1c6ce9000537 | 0.0.1 | hfopenllm_v2/Aryanne_SuperHeart/1762652579.483199 | 1762652579.4832 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | Aryanne/SuperHeart | Aryanne/SuperHeart | Aryanne | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5192234382549413}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5215375046264326}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.15634441087613293}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3011744966442953}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44357291666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3912067819148936}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | jaredjoss | jaredjoss/pythia-410m-roberta-lr_8e7-kl_01-steps_12000-rlhf-model | cf6b0824-45c4-4b47-bf23-e5df5673b74e | 0.0.1 | hfopenllm_v2/jaredjoss_pythia-410m-roberta-lr_8e7-kl_01-steps_12000-rlhf-model/1762652580.230787 | 1762652580.230787 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | jaredjoss/pythia-410m-roberta-lr_8e7-kl_01-steps_12000-rlhf-model | jaredjoss/pythia-410m-roberta-lr_8e7-kl_01-steps_12000-rlhf-model | jaredjoss | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.15722172723928066}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2863444769655102}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25922818791946306}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3606979166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11685505319148937}}] | {"precision": "float16", "architecture": "GPTNeoXForCausalLM", "params_billions": 0.407} |
HF Open LLM v2 | allura-org | allura-org/MoE-Girl-1BA-7BT | 5b3176a0-7ded-409a-bc54-70e0ecf9b325 | 0.0.1 | hfopenllm_v2/allura-org_MoE-Girl-1BA-7BT/1762652580.0080209 | 1762652580.008022 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | allura-org/MoE-Girl-1BA-7BT | allura-org/MoE-Girl-1BA-7BT | allura-org | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27050337548814923}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3139175363262408}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.015105740181268883}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25838926174496646}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34355208333333337}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.12175864361702128}}] | {"precision": "bfloat16", "architecture": "OlmoeForCausalLM", "params_billions": 6.919} |
HF Open LLM v2 | allura-org | allura-org/TQ2.5-14B-Neon-v1 | 68bdab24-8324-4190-abd2-ad3ad5a7a853 | 0.0.1 | hfopenllm_v2/allura-org_TQ2.5-14B-Neon-v1/1762652580.0085812 | 1762652580.0085819 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | allura-org/TQ2.5-14B-Neon-v1 | allura-org/TQ2.5-14B-Neon-v1 | allura-org | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6754189993661264}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.655304131044165}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36027190332326287}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3716442953020134}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.461}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5252659574468085}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.77} |
HF Open LLM v2 | allura-org | allura-org/MS-Meadowlark-22B | 7ea2cf22-114f-449c-a9cf-c4f379646cd3 | 0.0.1 | hfopenllm_v2/allura-org_MS-Meadowlark-22B/1762652580.007196 | 1762652580.007197 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | allura-org/MS-Meadowlark-22B | allura-org/MS-Meadowlark-22B | allura-org | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.669698621878837}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5162576933217772}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.18353474320241692}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32550335570469796}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3842604166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38231382978723405}}] | {"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 22.247} |
HF Open LLM v2 | allura-org | allura-org/TQ2.5-14B-Aletheia-v1 | b46bef60-b37b-4510-a92a-fb4c0cabb357 | 0.0.1 | hfopenllm_v2/allura-org_TQ2.5-14B-Aletheia-v1/1762652580.008265 | 1762652580.008276 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | allura-org/TQ2.5-14B-Aletheia-v1 | allura-org/TQ2.5-14B-Aletheia-v1 | allura-org | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7530297388706411}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6585074769185942}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33987915407854985}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3624161073825503}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44515625}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5241023936170213}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.77} |
HF Open LLM v2 | allura-org | allura-org/Teleut-7b | 85ceb275-787a-4dbc-981a-513fd16606ea | 0.0.1 | hfopenllm_v2/allura-org_Teleut-7b/1762652580.008814 | 1762652580.008814 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | allura-org/Teleut-7b | allura-org/Teleut-7b | allura-org | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6378752820294595}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5141277814496585}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24093655589123866}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3263422818791946}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4640416666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4130651595744681}}] | {"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616} |
HF Open LLM v2 | allura-org | allura-org/L3.1-8b-RP-Ink | cb8c45ae-1be6-4ab0-9317-cfbfc8850dc4 | 0.0.1 | hfopenllm_v2/allura-org_L3.1-8b-RP-Ink/1762652580.006678 | 1762652580.006679 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | allura-org/L3.1-8b-RP-Ink | allura-org/L3.1-8b-RP-Ink | allura-org | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7811063533646281}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48284724308518095}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14803625377643503}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26426174496644295}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3608229166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3427526595744681}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | allura-org | allura-org/MN-12b-RP-Ink | 3dc6cdf9-e75d-4f9f-9b91-9592e70566f8 | 0.0.1 | hfopenllm_v2/allura-org_MN-12b-RP-Ink/1762652580.006974 | 1762652580.006975 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | allura-org/MN-12b-RP-Ink | allura-org/MN-12b-RP-Ink | allura-org | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7186332265056716}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4833826588550261}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11858006042296072}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28523489932885904}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38184375000000004}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3513962765957447}}] | {"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 12.248} |
HF Open LLM v2 | openchat | openchat/openchat-3.5-1210 | 6b3c8f0b-25ed-4ae3-be89-a91815091de0 | 0.0.1 | hfopenllm_v2/openchat_openchat-3.5-1210/1762652580.430838 | 1762652580.430839 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | openchat/openchat-3.5-1210 | openchat/openchat-3.5-1210 | openchat | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.603678240402133}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4535356846447984}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.07854984894259819}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3011744966442953}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4414375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3142453457446808}}] | {"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242} |
HF Open LLM v2 | openchat | openchat/openchat_3.5 | c2d66fd5-6c95-4b8e-b87f-c8f0ae00271a | 0.0.1 | hfopenllm_v2/openchat_openchat_3.5/1762652580.431262 | 1762652580.431263 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | openchat/openchat_3.5 | openchat/openchat_3.5 | openchat | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5931118321608887}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44263196862832893}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.07250755287009064}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2986577181208054}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4228645833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31532579787234044}}] | {"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.0} |
HF Open LLM v2 | openchat | openchat/openchat_v3.2_super | b7b3fcb7-bbc7-4f39-9daa-7a54362d5d68 | 0.0.1 | hfopenllm_v2/openchat_openchat_v3.2_super/1762652580.431961 | 1762652580.431962 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | openchat/openchat_v3.2_super | openchat/openchat_v3.2_super | openchat | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2861906408329898}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42212089838803973}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.021148036253776436}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26426174496644295}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41613541666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24251994680851063}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 13.0} |
HF Open LLM v2 | openchat | openchat/openchat-3.6-8b-20240522 | 2305b9e7-1c2b-42d7-b306-802e32d53e0f | 0.0.1 | hfopenllm_v2/openchat_openchat-3.6-8b-20240522/1762652580.4310489 | 1762652580.43105 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | openchat/openchat-3.6-8b-20240522 | openchat/openchat-3.6-8b-20240522 | openchat | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5343355629729118}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5338412089001999}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.09969788519637462}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3179530201342282}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3998541666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32288896276595747}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | openchat | openchat/openchat_v3.2 | 2ee1a517-ef52-469e-ac5d-f14e3d72c87c | 0.0.1 | hfopenllm_v2/openchat_openchat_v3.2/1762652580.431712 | 1762652580.431714 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | openchat/openchat_v3.2 | openchat/openchat_v3.2 | openchat | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2980558252104416}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4330564283474314}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.01283987915407855}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2701342281879195}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.433625}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2421875}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 13.0} |
HF Open LLM v2 | openchat | openchat/openchat-3.5-0106 | 51cd5c94-7c87-4758-aadc-46acf20ab4b0 | 0.0.1 | hfopenllm_v2/openchat_openchat-3.5-0106/1762652580.430586 | 1762652580.4305868 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | openchat/openchat-3.5-0106 | openchat/openchat-3.5-0106 | openchat | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5966590867786362}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46169787083960595}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.07628398791540786}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30788590604026844}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42543749999999997}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3291223404255319}}] | {"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 7.242} |
HF Open LLM v2 | SultanR | SultanR/SmolTulu-1.7b-Instruct | 1b0bd686-fd26-441f-b280-97b10bb1449c | 0.0.1 | hfopenllm_v2/SultanR_SmolTulu-1.7b-Instruct/1762652579.893334 | 1762652579.893334 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | SultanR/SmolTulu-1.7b-Instruct | SultanR/SmolTulu-1.7b-Instruct | SultanR | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6540867121459949}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3713086260572204}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.07930513595166164}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26929530201342283}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35403125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17104388297872342}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.711} |
HF Open LLM v2 | SultanR | SultanR/SmolTulu-1.7b-Reinforced | 224b4cbc-e36c-4f68-9918-edbdaf947191 | 0.0.1 | hfopenllm_v2/SultanR_SmolTulu-1.7b-Reinforced/1762652579.893585 | 1762652579.893586 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | SultanR/SmolTulu-1.7b-Reinforced | SultanR/SmolTulu-1.7b-Reinforced | SultanR | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6790659893526954}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3551868188444029}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.07175226586102719}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.276006711409396}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34060416666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17627992021276595}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.711} |
HF Open LLM v2 | SultanR | SultanR/SmolTulu-1.7b-it-v0 | 22ea218a-e3be-4e05-9a94-af716bb3a624 | 0.0.1 | hfopenllm_v2/SultanR_SmolTulu-1.7b-it-v0/1762652579.8938031 | 1762652579.8938031 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | SultanR/SmolTulu-1.7b-it-v0 | SultanR/SmolTulu-1.7b-it-v0 | SultanR | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6540867121459949}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3713086260572204}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.07930513595166164}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26929530201342283}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35403125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17104388297872342}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.711} |
HF Open LLM v2 | moeru-ai | moeru-ai/L3.1-Moe-4x8B-v0.2 | e6fe5591-f6aa-40c6-897f-f90084682109 | 0.0.1 | hfopenllm_v2/moeru-ai_L3.1-Moe-4x8B-v0.2/1762652580.372139 | 1762652580.37214 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | moeru-ai/L3.1-Moe-4x8B-v0.2 | moeru-ai/L3.1-Moe-4x8B-v0.2 | moeru-ai | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5406554608438943}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.446625675582615}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.10347432024169184}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26677852348993286}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3233958333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27626329787234044}}] | {"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 24.942} |
HF Open LLM v2 | moeru-ai | moeru-ai/L3.1-Moe-4x8B-v0.1 | bbcae028-046e-4e87-b991-5d7b92c42cc2 | 0.0.1 | hfopenllm_v2/moeru-ai_L3.1-Moe-4x8B-v0.1/1762652580.371937 | 1762652580.371938 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | moeru-ai/L3.1-Moe-4x8B-v0.1 | moeru-ai/L3.1-Moe-4x8B-v0.1 | moeru-ai | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.433219413378724}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49392781736367014}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1299093655589124}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25922818791946306}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3609166666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34541223404255317}}] | {"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 24.942} |
HF Open LLM v2 | moeru-ai | moeru-ai/L3.1-Moe-2x8B-v0.2 | cf47622f-c921-4610-adef-bed2a4670249 | 0.0.1 | hfopenllm_v2/moeru-ai_L3.1-Moe-2x8B-v0.2/1762652580.371698 | 1762652580.3716989 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | moeru-ai/L3.1-Moe-2x8B-v0.2 | moeru-ai/L3.1-Moe-2x8B-v0.2 | moeru-ai | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7347947889377962}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5255688392585965}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16993957703927492}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30033557046979864}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41985416666666664}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38580452127659576}}] | {"precision": "bfloat16", "architecture": "MixtralForCausalLM", "params_billions": 13.668} |
HF Open LLM v2 | DreadPoor | DreadPoor/Derivative_V2-8B-Model_Stock | 3320dceb-b5ef-4267-81d3-b6fe2a415eee | 0.0.1 | hfopenllm_v2/DreadPoor_Derivative_V2-8B-Model_Stock/1762652579.5701172 | 1762652579.570118 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | DreadPoor/Derivative_V2-8B-Model_Stock | DreadPoor/Derivative_V2-8B-Model_Stock | DreadPoor | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7536791269387447}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5392643954415269}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1797583081570997}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3070469798657718}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41229166666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38563829787234044}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | DreadPoor | DreadPoor/Yearn_V3-8B-Model_Stock | 763eec85-4395-43b6-aa79-9ecb024eb7af | 0.0.1 | hfopenllm_v2/DreadPoor_Yearn_V3-8B-Model_Stock/1762652579.587668 | 1762652579.587669 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | DreadPoor/Yearn_V3-8B-Model_Stock | DreadPoor/Yearn_V3-8B-Model_Stock | DreadPoor | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7289746760816855}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5322019394938072}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.18957703927492447}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3053691275167785}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3908958333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3801529255319149}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | DreadPoor | DreadPoor/Incidental-8B-Model_Stock | 102ed90e-cbe3-4219-b9c6-cec82c78941f | 0.0.1 | hfopenllm_v2/DreadPoor_Incidental-8B-Model_Stock/1762652579.573979 | 1762652579.5739799 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | DreadPoor/Incidental-8B-Model_Stock | DreadPoor/Incidental-8B-Model_Stock | DreadPoor | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.748183708116686}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5452070612873019}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16163141993957703}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3028523489932886}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42401041666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3873005319148936}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | DreadPoor | DreadPoor/ichor_1.1-8B-Model_Stock | 64afccfe-af45-4c26-878a-eb01b56f3524 | 0.0.1 | hfopenllm_v2/DreadPoor_ichor_1.1-8B-Model_Stock/1762652579.589439 | 1762652579.589439 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | DreadPoor/ichor_1.1-8B-Model_Stock | DreadPoor/ichor_1.1-8B-Model_Stock | DreadPoor | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8096328851890761}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.528067770617839}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17749244712990936}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3062080536912752}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4067708333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3855551861702128}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | DreadPoor | DreadPoor/Nother_One-8B-Model_Stock | 464f363d-ab94-4cac-8846-fbcf25be3dec | 0.0.1 | hfopenllm_v2/DreadPoor_Nother_One-8B-Model_Stock/1762652579.578036 | 1762652579.578037 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | DreadPoor/Nother_One-8B-Model_Stock | DreadPoor/Nother_One-8B-Model_Stock | DreadPoor | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6863101016414226}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5204527600425481}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.15181268882175228}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28942953020134227}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38702083333333337}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35945811170212766}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | DreadPoor | DreadPoor/remember_to_breathe-8b-Model-Stock | 76309e63-a135-45cf-9f06-b091215726d0 | 0.0.1 | hfopenllm_v2/DreadPoor_remember_to_breathe-8b-Model-Stock/1762652579.5907981 | 1762652579.590799 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | DreadPoor/remember_to_breathe-8b-Model-Stock | DreadPoor/remember_to_breathe-8b-Model-Stock | DreadPoor | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7104150321147887}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5411654435599922}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1487915407854985}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3011744966442953}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4144583333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37608045212765956}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | DreadPoor | DreadPoor/L3.1-BaeZel-8B-Della | 6c7dfbaf-648e-4c4a-907f-8639ab1c7312 | 0.0.1 | hfopenllm_v2/DreadPoor_L3.1-BaeZel-8B-Della/1762652579.575009 | 1762652579.57501 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | DreadPoor/L3.1-BaeZel-8B-Della | DreadPoor/L3.1-BaeZel-8B-Della | DreadPoor | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5180243974875552}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5448449542185521}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17447129909365558}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3196308724832215}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4199791666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3902094414893617}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | DreadPoor | DreadPoor/Lava_Lamp-8B-SLERP | 26d89e91-7f52-4913-a4e0-3275cca1d8d7 | 0.0.1 | hfopenllm_v2/DreadPoor_Lava_Lamp-8B-SLERP/1762652579.575455 | 1762652579.575455 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | DreadPoor/Lava_Lamp-8B-SLERP | DreadPoor/Lava_Lamp-8B-SLERP | DreadPoor | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7381170848903134}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5367586873360172}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17371601208459214}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3053691275167785}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4187083333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.375}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | DreadPoor | DreadPoor/Oh_Boy-8B-LINEAR | 393ad85d-6b8b-466d-99e0-6a89bf0ce66e | 0.0.1 | hfopenllm_v2/DreadPoor_Oh_Boy-8B-LINEAR/1762652579.5791628 | 1762652579.5791638 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | DreadPoor/Oh_Boy-8B-LINEAR | DreadPoor/Oh_Boy-8B-LINEAR | DreadPoor | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7503069633018169}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5375114406292553}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1782477341389728}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30788590604026844}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4107708333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3848902925531915}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | DreadPoor | DreadPoor/Derivative-8B-Model_Stock | 9ef7e716-8638-46ac-a455-f601c1cfddc1 | 0.0.1 | hfopenllm_v2/DreadPoor_Derivative-8B-Model_Stock/1762652579.569859 | 1762652579.56986 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | DreadPoor/Derivative-8B-Model_Stock | DreadPoor/Derivative-8B-Model_Stock | DreadPoor | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7667433520835827}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5395493987763994}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17900302114803626}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31711409395973156}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42004166666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3810671542553192}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | DreadPoor | DreadPoor/Heart_Stolen-ALT-8B-Model_Stock | 141d8908-50cb-4457-a0f0-93d55d1c705b | 0.0.1 | hfopenllm_v2/DreadPoor_Heart_Stolen-ALT-8B-Model_Stock/1762652579.573096 | 1762652579.573097 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | DreadPoor/Heart_Stolen-ALT-8B-Model_Stock | DreadPoor/Heart_Stolen-ALT-8B-Model_Stock | DreadPoor | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7183584001560305}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.526338467747489}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.15634441087613293}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3011744966442953}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40549999999999997}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37724401595744683}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
HF Open LLM v2 | DreadPoor | DreadPoor/Aspire_V2.1-8B-Model_Stock | 292e77cb-e6e6-4d10-9956-1e09369e9669 | 0.0.1 | hfopenllm_v2/DreadPoor_Aspire_V2.1-8B-Model_Stock/1762652579.564126 | 1762652579.564127 | ["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"] | HF Open LLM v2 | leaderboard | Hugging Face | null | null | third_party | DreadPoor/Aspire_V2.1-8B-Model_Stock | DreadPoor/Aspire_V2.1-8B-Model_Stock | DreadPoor | unknown | [{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7237540836092679}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5236395810818485}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17673716012084592}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30956375838926176}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41359375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3800698138297872}}] | {"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.