EEE_datastore / data /helm_lite /microsoft /phi-2 /a06a38e5-c198-4efd-96f3-b52bd7f9c6dc.json
deepmage121's picture
Upload 5295 files
ae9c537 verified
{
"schema_version": "0.2.0",
"evaluation_id": "helm_lite/microsoft_phi-2/1770834614.1822479",
"retrieved_timestamp": "1770834614.1822479",
"source_metadata": {
"source_name": "helm_lite",
"source_type": "documentation",
"source_organization_name": "crfm",
"evaluator_relationship": "third_party"
},
"model_info": {
"name": "Phi-2",
"id": "microsoft/phi-2",
"developer": "microsoft",
"inference_platform": "unknown"
},
"evaluation_results": [
{
"evaluation_name": "Mean win rate",
"source_data": {
"dataset_name": "helm_lite",
"source_type": "url",
"url": [
"https://storage.googleapis.com/crfm-helm-public/lite/benchmark_output/releases/v1.13.0/groups/core_scenarios.json"
]
},
"metric_config": {
"evaluation_description": "How many models this model outperforms on average (over columns).",
"lower_is_better": false,
"score_type": "continuous",
"min_score": 0.0,
"max_score": 1.0
},
"score_details": {
"score": 0.169,
"details": {
"tab": "Accuracy",
"Mean win rate - Efficiency": {
"description": null,
"tab": "Efficiency",
"score": 0.9032709113607991
},
"Mean win rate - General information": {
"description": null,
"tab": "General information",
"score": null
}
}
},
"generation_config": {
"additional_details": {}
}
},
{
"evaluation_name": "NarrativeQA",
"source_data": {
"dataset_name": "NarrativeQA",
"source_type": "url",
"url": [
"https://storage.googleapis.com/crfm-helm-public/lite/benchmark_output/releases/v1.13.0/groups/core_scenarios.json"
]
},
"metric_config": {
"evaluation_description": "F1 on NarrativeQA",
"lower_is_better": false,
"score_type": "continuous",
"min_score": 0.0,
"max_score": 1.0
},
"score_details": {
"score": 0.703,
"details": {
"description": "min=0.703, mean=0.703, max=0.703, sum=0.703 (1)",
"tab": "Accuracy",
"NarrativeQA - Observed inference time (s)": {
"description": "min=0.493, mean=0.493, max=0.493, sum=0.493 (1)",
"tab": "Efficiency",
"score": 0.49325697791408485
},
"NarrativeQA - # eval": {
"description": "min=355, mean=355, max=355, sum=355 (1)",
"tab": "General information",
"score": 355.0
},
"NarrativeQA - # train": {
"description": "min=2.085, mean=2.085, max=2.085, sum=2.085 (1)",
"tab": "General information",
"score": 2.084507042253521
},
"NarrativeQA - truncated": {
"description": "min=0, mean=0, max=0, sum=0 (1)",
"tab": "General information",
"score": 0.0
},
"NarrativeQA - # prompt tokens": {
"description": "min=1705.006, mean=1705.006, max=1705.006, sum=1705.006 (1)",
"tab": "General information",
"score": 1705.0056338028169
},
"NarrativeQA - # output tokens": {
"description": "min=1, mean=1, max=1, sum=1 (1)",
"tab": "General information",
"score": 1.0
}
}
},
"generation_config": {
"additional_details": {}
}
},
{
"evaluation_name": "NaturalQuestions (closed-book)",
"source_data": {
"dataset_name": "NaturalQuestions (closed-book)",
"source_type": "url",
"url": [
"https://storage.googleapis.com/crfm-helm-public/lite/benchmark_output/releases/v1.13.0/groups/core_scenarios.json"
]
},
"metric_config": {
"evaluation_description": "F1 on NaturalQuestions (closed-book)",
"lower_is_better": false,
"score_type": "continuous",
"min_score": 0.0,
"max_score": 1.0
},
"score_details": {
"score": 0.155,
"details": {
"description": "min=0.155, mean=0.155, max=0.155, sum=0.155 (1)",
"tab": "Accuracy",
"NaturalQuestions (open-book) - Observed inference time (s)": {
"description": "min=0.47, mean=0.47, max=0.47, sum=0.47 (1)",
"tab": "Efficiency",
"score": 0.46984758591651915
},
"NaturalQuestions (closed-book) - Observed inference time (s)": {
"description": "min=0.292, mean=0.292, max=0.292, sum=0.292 (1)",
"tab": "Efficiency",
"score": 0.29179329943656923
},
"NaturalQuestions (open-book) - # eval": {
"description": "min=1000, mean=1000, max=1000, sum=1000 (1)",
"tab": "General information",
"score": 1000.0
},
"NaturalQuestions (open-book) - # train": {
"description": "min=4.706, mean=4.706, max=4.706, sum=4.706 (1)",
"tab": "General information",
"score": 4.706
},
"NaturalQuestions (open-book) - truncated": {
"description": "min=0.036, mean=0.036, max=0.036, sum=0.036 (1)",
"tab": "General information",
"score": 0.036
},
"NaturalQuestions (open-book) - # prompt tokens": {
"description": "min=1493.994, mean=1493.994, max=1493.994, sum=1493.994 (1)",
"tab": "General information",
"score": 1493.994
},
"NaturalQuestions (open-book) - # output tokens": {
"description": "min=1, mean=1, max=1, sum=1 (1)",
"tab": "General information",
"score": 1.0
},
"NaturalQuestions (closed-book) - # eval": {
"description": "min=1000, mean=1000, max=1000, sum=1000 (1)",
"tab": "General information",
"score": 1000.0
},
"NaturalQuestions (closed-book) - # train": {
"description": "min=5, mean=5, max=5, sum=5 (1)",
"tab": "General information",
"score": 5.0
},
"NaturalQuestions (closed-book) - truncated": {
"description": "min=0, mean=0, max=0, sum=0 (1)",
"tab": "General information",
"score": 0.0
},
"NaturalQuestions (closed-book) - # prompt tokens": {
"description": "min=116.254, mean=116.254, max=116.254, sum=116.254 (1)",
"tab": "General information",
"score": 116.254
},
"NaturalQuestions (closed-book) - # output tokens": {
"description": "min=1, mean=1, max=1, sum=1 (1)",
"tab": "General information",
"score": 1.0
}
}
},
"generation_config": {
"additional_details": {
"mode": "closedbook"
}
}
},
{
"evaluation_name": "OpenbookQA",
"source_data": {
"dataset_name": "OpenbookQA",
"source_type": "url",
"url": [
"https://storage.googleapis.com/crfm-helm-public/lite/benchmark_output/releases/v1.13.0/groups/core_scenarios.json"
]
},
"metric_config": {
"evaluation_description": "EM on OpenbookQA",
"lower_is_better": false,
"score_type": "continuous",
"min_score": 0.0,
"max_score": 1.0
},
"score_details": {
"score": 0.798,
"details": {
"description": "min=0.798, mean=0.798, max=0.798, sum=0.798 (1)",
"tab": "Accuracy",
"OpenbookQA - Observed inference time (s)": {
"description": "min=0.262, mean=0.262, max=0.262, sum=0.262 (1)",
"tab": "Efficiency",
"score": 0.2615062308311462
},
"OpenbookQA - # eval": {
"description": "min=500, mean=500, max=500, sum=500 (1)",
"tab": "General information",
"score": 500.0
},
"OpenbookQA - # train": {
"description": "min=5, mean=5, max=5, sum=5 (1)",
"tab": "General information",
"score": 5.0
},
"OpenbookQA - truncated": {
"description": "min=0, mean=0, max=0, sum=0 (1)",
"tab": "General information",
"score": 0.0
},
"OpenbookQA - # prompt tokens": {
"description": "min=254.216, mean=254.216, max=254.216, sum=254.216 (1)",
"tab": "General information",
"score": 254.216
},
"OpenbookQA - # output tokens": {
"description": "min=1, mean=1, max=1, sum=1 (1)",
"tab": "General information",
"score": 1.0
}
}
},
"generation_config": {
"additional_details": {
"dataset": "openbookqa",
"method": "multiple_choice_joint"
}
}
},
{
"evaluation_name": "MMLU",
"source_data": {
"dataset_name": "MMLU",
"source_type": "url",
"url": [
"https://storage.googleapis.com/crfm-helm-public/lite/benchmark_output/releases/v1.13.0/groups/core_scenarios.json"
]
},
"metric_config": {
"evaluation_description": "EM on MMLU",
"lower_is_better": false,
"score_type": "continuous",
"min_score": 0.0,
"max_score": 1.0
},
"score_details": {
"score": 0.518,
"details": {
"description": "min=0.31, mean=0.518, max=0.78, sum=2.592 (5)",
"tab": "Accuracy",
"MMLU - Observed inference time (s)": {
"description": "min=0.27, mean=0.285, max=0.295, sum=1.426 (5)",
"tab": "Efficiency",
"score": 0.28525047320650343
},
"MMLU - # eval": {
"description": "min=100, mean=102.8, max=114, sum=514 (5)",
"tab": "General information",
"score": 102.8
},
"MMLU - # train": {
"description": "min=5, mean=5, max=5, sum=25 (5)",
"tab": "General information",
"score": 5.0
},
"MMLU - truncated": {
"description": "min=0, mean=0, max=0, sum=0 (5)",
"tab": "General information",
"score": 0.0
},
"MMLU - # prompt tokens": {
"description": "min=371.38, mean=472.274, max=624.07, sum=2361.37 (5)",
"tab": "General information",
"score": 472.2740350877192
},
"MMLU - # output tokens": {
"description": "min=1, mean=1, max=1, sum=5 (5)",
"tab": "General information",
"score": 1.0
}
}
},
"generation_config": {
"additional_details": {
"subject": [
"abstract_algebra",
"college_chemistry",
"computer_security",
"econometrics",
"us_foreign_policy"
],
"method": "multiple_choice_joint"
}
}
},
{
"evaluation_name": "MATH",
"source_data": {
"dataset_name": "MATH",
"source_type": "url",
"url": [
"https://storage.googleapis.com/crfm-helm-public/lite/benchmark_output/releases/v1.13.0/groups/core_scenarios.json"
]
},
"metric_config": {
"evaluation_description": "Equivalent (CoT) on MATH",
"lower_is_better": false,
"score_type": "continuous",
"min_score": 0.0,
"max_score": 1.0
},
"score_details": {
"score": 0.255,
"details": {
"description": "min=0.033, mean=0.255, max=0.465, sum=1.786 (7)",
"tab": "Accuracy",
"MATH - Observed inference time (s)": {
"description": "min=0.923, mean=1.129, max=1.577, sum=7.902 (7)",
"tab": "Efficiency",
"score": 1.1288332585709453
},
"MATH - # eval": {
"description": "min=30, mean=62.429, max=135, sum=437 (7)",
"tab": "General information",
"score": 62.42857142857143
},
"MATH - # train": {
"description": "min=2.962, mean=6.916, max=8, sum=48.409 (7)",
"tab": "General information",
"score": 6.915558126084441
},
"MATH - truncated": {
"description": "min=0, mean=0, max=0, sum=0 (7)",
"tab": "General information",
"score": 0.0
},
"MATH - # prompt tokens": {
"description": "min=906.541, mean=1162.126, max=1511.442, sum=8134.881 (7)",
"tab": "General information",
"score": 1162.1258475895563
},
"MATH - # output tokens": {
"description": "min=1, mean=1, max=1, sum=7 (7)",
"tab": "General information",
"score": 1.0
}
}
},
"generation_config": {
"additional_details": {
"subject": [
"algebra",
"counting_and_probability",
"geometry",
"intermediate_algebra",
"number_theory",
"prealgebra",
"precalculus"
],
"level": "1",
"use_official_examples": "False",
"use_chain_of_thought": "True"
}
}
},
{
"evaluation_name": "GSM8K",
"source_data": {
"dataset_name": "GSM8K",
"source_type": "url",
"url": [
"https://storage.googleapis.com/crfm-helm-public/lite/benchmark_output/releases/v1.13.0/groups/core_scenarios.json"
]
},
"metric_config": {
"evaluation_description": "EM on GSM8K",
"lower_is_better": false,
"score_type": "continuous",
"min_score": 0.0,
"max_score": 1.0
},
"score_details": {
"score": 0.581,
"details": {
"description": "min=0.581, mean=0.581, max=0.581, sum=0.581 (1)",
"tab": "Accuracy",
"GSM8K - Observed inference time (s)": {
"description": "min=1.147, mean=1.147, max=1.147, sum=1.147 (1)",
"tab": "Efficiency",
"score": 1.1468114259243012
},
"GSM8K - # eval": {
"description": "min=1000, mean=1000, max=1000, sum=1000 (1)",
"tab": "General information",
"score": 1000.0
},
"GSM8K - # train": {
"description": "min=5, mean=5, max=5, sum=5 (1)",
"tab": "General information",
"score": 5.0
},
"GSM8K - truncated": {
"description": "min=0, mean=0, max=0, sum=0 (1)",
"tab": "General information",
"score": 0.0
},
"GSM8K - # prompt tokens": {
"description": "min=938.893, mean=938.893, max=938.893, sum=938.893 (1)",
"tab": "General information",
"score": 938.893
},
"GSM8K - # output tokens": {
"description": "min=1, mean=1, max=1, sum=1 (1)",
"tab": "General information",
"score": 1.0
}
}
},
"generation_config": {
"additional_details": {}
}
},
{
"evaluation_name": "LegalBench",
"source_data": {
"dataset_name": "LegalBench",
"source_type": "url",
"url": [
"https://storage.googleapis.com/crfm-helm-public/lite/benchmark_output/releases/v1.13.0/groups/core_scenarios.json"
]
},
"metric_config": {
"evaluation_description": "EM on LegalBench",
"lower_is_better": false,
"score_type": "continuous",
"min_score": 0.0,
"max_score": 1.0
},
"score_details": {
"score": 0.334,
"details": {
"description": "min=0.137, mean=0.334, max=0.537, sum=1.672 (5)",
"tab": "Accuracy",
"LegalBench - Observed inference time (s)": {
"description": "min=0.268, mean=0.303, max=0.381, sum=1.517 (5)",
"tab": "Efficiency",
"score": 0.3034723702962031
},
"LegalBench - # eval": {
"description": "min=95, mean=409.4, max=1000, sum=2047 (5)",
"tab": "General information",
"score": 409.4
},
"LegalBench - # train": {
"description": "min=0.337, mean=3.867, max=5, sum=19.337 (5)",
"tab": "General information",
"score": 3.8673469387755106
},
"LegalBench - truncated": {
"description": "min=0, mean=0.003, max=0.014, sum=0.014 (5)",
"tab": "General information",
"score": 0.002857142857142857
},
"LegalBench - # prompt tokens": {
"description": "min=205.632, mean=566.249, max=1519.543, sum=2831.243 (5)",
"tab": "General information",
"score": 566.2485439511586
},
"LegalBench - # output tokens": {
"description": "min=1, mean=1, max=1, sum=5 (5)",
"tab": "General information",
"score": 1.0
}
}
},
"generation_config": {
"additional_details": {
"subset": [
"abercrombie",
"corporate_lobbying",
"function_of_decision_section",
"international_citizenship_questions",
"proa"
]
}
}
},
{
"evaluation_name": "MedQA",
"source_data": {
"dataset_name": "MedQA",
"source_type": "url",
"url": [
"https://storage.googleapis.com/crfm-helm-public/lite/benchmark_output/releases/v1.13.0/groups/core_scenarios.json"
]
},
"metric_config": {
"evaluation_description": "EM on MedQA",
"lower_is_better": false,
"score_type": "continuous",
"min_score": 0.0,
"max_score": 1.0
},
"score_details": {
"score": 0.41,
"details": {
"description": "min=0.41, mean=0.41, max=0.41, sum=0.41 (1)",
"tab": "Accuracy",
"MedQA - Observed inference time (s)": {
"description": "min=0.275, mean=0.275, max=0.275, sum=0.275 (1)",
"tab": "Efficiency",
"score": 0.27509861532783886
},
"MedQA - # eval": {
"description": "min=503, mean=503, max=503, sum=503 (1)",
"tab": "General information",
"score": 503.0
},
"MedQA - # train": {
"description": "min=5, mean=5, max=5, sum=5 (1)",
"tab": "General information",
"score": 5.0
},
"MedQA - truncated": {
"description": "min=0, mean=0, max=0, sum=0 (1)",
"tab": "General information",
"score": 0.0
},
"MedQA - # prompt tokens": {
"description": "min=1038.833, mean=1038.833, max=1038.833, sum=1038.833 (1)",
"tab": "General information",
"score": 1038.8330019880716
},
"MedQA - # output tokens": {
"description": "min=1, mean=1, max=1, sum=1 (1)",
"tab": "General information",
"score": 1.0
}
}
},
"generation_config": {
"additional_details": {}
}
},
{
"evaluation_name": "WMT 2014",
"source_data": {
"dataset_name": "WMT 2014",
"source_type": "url",
"url": [
"https://storage.googleapis.com/crfm-helm-public/lite/benchmark_output/releases/v1.13.0/groups/core_scenarios.json"
]
},
"metric_config": {
"evaluation_description": "BLEU-4 on WMT 2014",
"lower_is_better": false,
"score_type": "continuous",
"min_score": 0.0,
"max_score": 1.0
},
"score_details": {
"score": 0.038,
"details": {
"description": "min=0.0, mean=0.038, max=0.113, sum=0.189 (5)",
"tab": "Accuracy",
"WMT 2014 - Observed inference time (s)": {
"description": "min=0.427, mean=0.47, max=0.534, sum=2.35 (5)",
"tab": "Efficiency",
"score": 0.47001117224047206
},
"WMT 2014 - # eval": {
"description": "min=503, mean=568.8, max=832, sum=2844 (5)",
"tab": "General information",
"score": 568.8
},
"WMT 2014 - # train": {
"description": "min=1, mean=1, max=1, sum=5 (5)",
"tab": "General information",
"score": 1.0
},
"WMT 2014 - truncated": {
"description": "min=0, mean=0, max=0, sum=0 (5)",
"tab": "General information",
"score": 0.0
},
"WMT 2014 - # prompt tokens": {
"description": "min=136.93, mean=181.692, max=241.656, sum=908.462 (5)",
"tab": "General information",
"score": 181.69235022556967
},
"WMT 2014 - # output tokens": {
"description": "min=1, mean=1, max=1, sum=5 (5)",
"tab": "General information",
"score": 1.0
}
}
},
"generation_config": {
"additional_details": {
"language_pair": [
"cs-en",
"de-en",
"fr-en",
"hi-en",
"ru-en"
]
}
}
}
]
}