| { | |
| "title": "Accuracy", | |
| "header": [ | |
| { | |
| "value": "Model", | |
| "markdown": false, | |
| "metadata": {} | |
| }, | |
| { | |
| "value": "Mean win rate", | |
| "description": "How many models this model outperforms on average (over columns).", | |
| "markdown": false, | |
| "lower_is_better": false, | |
| "metadata": {} | |
| }, | |
| { | |
| "value": "NaturalQuestions (closed-book) - F1", | |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\nF1: Average F1 score in terms of word overlap between the model output and correct reference.", | |
| "markdown": false, | |
| "lower_is_better": false, | |
| "metadata": { | |
| "metric": "F1", | |
| "run_group": "NaturalQuestions (closed-book)" | |
| } | |
| }, | |
| { | |
| "value": "HellaSwag - EM", | |
| "description": "The HellaSwag benchmark for commonsense reasoning in question answering [(Zellers et al., 2019)](https://aclanthology.org/P19-1472/).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.", | |
| "markdown": false, | |
| "lower_is_better": false, | |
| "metadata": { | |
| "metric": "EM", | |
| "run_group": "HellaSwag" | |
| } | |
| }, | |
| { | |
| "value": "OpenbookQA - EM", | |
| "description": "The OpenbookQA benchmark for commonsense-intensive open book question answering [(Mihaylov et al., 2018)](https://aclanthology.org/D18-1260/).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.", | |
| "markdown": false, | |
| "lower_is_better": false, | |
| "metadata": { | |
| "metric": "EM", | |
| "run_group": "OpenbookQA" | |
| } | |
| }, | |
| { | |
| "value": "TruthfulQA - EM", | |
| "description": "The TruthfulQA benchmarking for measuring model truthfulness and commonsense knowledge in question answering [(Lin et al., 2022)](https://aclanthology.org/2022.acl-long.229/).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.", | |
| "markdown": false, | |
| "lower_is_better": false, | |
| "metadata": { | |
| "metric": "EM", | |
| "run_group": "TruthfulQA" | |
| } | |
| }, | |
| { | |
| "value": "MMLU - EM", | |
| "description": "The Massive Multitask Language Understanding (MMLU) benchmark for knowledge-intensive question answering across 57 domains [(Hendrycks et al., 2021)](https://openreview.net/forum?id=d7KBjmI3GmQ).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.", | |
| "markdown": false, | |
| "lower_is_better": false, | |
| "metadata": { | |
| "metric": "EM", | |
| "run_group": "MMLU" | |
| } | |
| }, | |
| { | |
| "value": "WikiFact - EM", | |
| "description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.", | |
| "markdown": false, | |
| "lower_is_better": false, | |
| "metadata": { | |
| "metric": "EM", | |
| "run_group": "WikiFact" | |
| } | |
| } | |
| ], | |
| "rows": [ | |
| [ | |
| { | |
| "value": "EleutherAI/pythia-2.8b", | |
| "description": "", | |
| "markdown": false | |
| }, | |
| { | |
| "markdown": false | |
| }, | |
| { | |
| "description": "No matching runs", | |
| "markdown": false | |
| }, | |
| { | |
| "description": "No matching runs", | |
| "markdown": false | |
| }, | |
| { | |
| "description": "No matching runs", | |
| "markdown": false | |
| }, | |
| { | |
| "description": "No matching runs", | |
| "markdown": false | |
| }, | |
| { | |
| "value": 0.27487719298245616, | |
| "description": "min=0.16, mean=0.275, max=0.42, sum=1.374 (5)", | |
| "style": { | |
| "font-weight": "bold" | |
| }, | |
| "markdown": false, | |
| "run_spec_names": [ | |
| "mmlu:subject=abstract_algebra,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", | |
| "mmlu:subject=college_chemistry,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", | |
| "mmlu:subject=computer_security,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", | |
| "mmlu:subject=econometrics,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", | |
| "mmlu:subject=us_foreign_policy,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical" | |
| ] | |
| }, | |
| { | |
| "value": 0.0075921845770110154, | |
| "description": "min=0, mean=0.008, max=0.034, sum=0.076 (10)", | |
| "style": { | |
| "font-weight": "bold" | |
| }, | |
| "markdown": false, | |
| "run_spec_names": [ | |
| "wikifact:k=5,subject=author,model=EleutherAI_pythia-2.8b", | |
| "wikifact:k=5,subject=currency,model=EleutherAI_pythia-2.8b", | |
| "wikifact:k=5,subject=discoverer_or_inventor,model=EleutherAI_pythia-2.8b", | |
| "wikifact:k=5,subject=instance_of,model=EleutherAI_pythia-2.8b", | |
| "wikifact:k=5,subject=medical_condition_treated,model=EleutherAI_pythia-2.8b", | |
| "wikifact:k=5,subject=part_of,model=EleutherAI_pythia-2.8b", | |
| "wikifact:k=5,subject=place_of_birth,model=EleutherAI_pythia-2.8b", | |
| "wikifact:k=5,subject=plaintiff,model=EleutherAI_pythia-2.8b", | |
| "wikifact:k=5,subject=position_held,model=EleutherAI_pythia-2.8b", | |
| "wikifact:k=5,subject=symptoms_and_signs,model=EleutherAI_pythia-2.8b" | |
| ] | |
| } | |
| ] | |
| ], | |
| "links": [ | |
| { | |
| "text": "LaTeX", | |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/knowledge_accuracy.tex" | |
| }, | |
| { | |
| "text": "JSON", | |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/knowledge_accuracy.json" | |
| } | |
| ], | |
| "name": "accuracy" | |
| } |