File size: 6,012 Bytes
2169461 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 | {
"title": "Accuracy",
"header": [
{
"value": "Model",
"markdown": false,
"metadata": {}
},
{
"value": "Mean win rate",
"description": "How many models this model outperforms on average (over columns).",
"markdown": false,
"lower_is_better": false,
"metadata": {}
},
{
"value": "NaturalQuestions (closed-book) - F1",
"description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\nF1: Average F1 score in terms of word overlap between the model output and correct reference.",
"markdown": false,
"lower_is_better": false,
"metadata": {
"metric": "F1",
"run_group": "NaturalQuestions (closed-book)"
}
},
{
"value": "HellaSwag - EM",
"description": "The HellaSwag benchmark for commonsense reasoning in question answering [(Zellers et al., 2019)](https://aclanthology.org/P19-1472/).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.",
"markdown": false,
"lower_is_better": false,
"metadata": {
"metric": "EM",
"run_group": "HellaSwag"
}
},
{
"value": "OpenbookQA - EM",
"description": "The OpenbookQA benchmark for commonsense-intensive open book question answering [(Mihaylov et al., 2018)](https://aclanthology.org/D18-1260/).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.",
"markdown": false,
"lower_is_better": false,
"metadata": {
"metric": "EM",
"run_group": "OpenbookQA"
}
},
{
"value": "TruthfulQA - EM",
"description": "The TruthfulQA benchmarking for measuring model truthfulness and commonsense knowledge in question answering [(Lin et al., 2022)](https://aclanthology.org/2022.acl-long.229/).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.",
"markdown": false,
"lower_is_better": false,
"metadata": {
"metric": "EM",
"run_group": "TruthfulQA"
}
},
{
"value": "MMLU - EM",
"description": "The Massive Multitask Language Understanding (MMLU) benchmark for knowledge-intensive question answering across 57 domains [(Hendrycks et al., 2021)](https://openreview.net/forum?id=d7KBjmI3GmQ).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.",
"markdown": false,
"lower_is_better": false,
"metadata": {
"metric": "EM",
"run_group": "MMLU"
}
},
{
"value": "WikiFact - EM",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.",
"markdown": false,
"lower_is_better": false,
"metadata": {
"metric": "EM",
"run_group": "WikiFact"
}
}
],
"rows": [
[
{
"value": "EleutherAI/pythia-2.8b",
"description": "",
"markdown": false
},
{
"markdown": false
},
{
"description": "No matching runs",
"markdown": false
},
{
"description": "No matching runs",
"markdown": false
},
{
"description": "No matching runs",
"markdown": false
},
{
"description": "No matching runs",
"markdown": false
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (5)",
"style": {
"font-weight": "bold"
},
"markdown": false,
"run_spec_names": [
"mmlu:subject=abstract_algebra,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"mmlu:subject=college_chemistry,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"mmlu:subject=computer_security,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"mmlu:subject=econometrics,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"mmlu:subject=us_foreign_policy,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical"
]
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (10)",
"style": {
"font-weight": "bold"
},
"markdown": false,
"run_spec_names": [
"wikifact:k=5,subject=author,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=currency,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=discoverer_or_inventor,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=instance_of,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=medical_condition_treated,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=part_of,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=place_of_birth,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=plaintiff,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=position_held,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=symptoms_and_signs,model=EleutherAI_pythia-2.8b"
]
}
]
],
"links": [
{
"text": "LaTeX",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2/groups/latex/knowledge_accuracy.tex"
},
{
"text": "JSON",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2/groups/json/knowledge_accuracy.json"
}
],
"name": "accuracy"
} |