| [ |
| { |
| "title": "Accuracy", |
| "header": [ |
| { |
| "value": "Model", |
| "markdown": false, |
| "metadata": {} |
| }, |
| { |
| "value": "Mean win rate", |
| "description": "How many models this model outperforms on average (over columns).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": {} |
| }, |
| { |
| "value": "The Pile - BPB", |
| "description": "The Pile corpus for measuring lanugage model performance across various domains [(Gao et al., 2020)](https://arxiv.org/pdf/2101.00027.pdf).\n\nBits/byte: Average number of bits per byte according to model probabilities.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "BPB", |
| "run_group": "The Pile" |
| } |
| }, |
| { |
| "value": "TwitterAAE - BPB", |
| "description": "The TwitterAAE corpus of [Blodgett et al. (2016)](https://aclanthology.org/D16-1120/) for measuring language model performance in tweets as a function of speaker dialect.\n\nBits/byte: Average number of bits per byte according to model probabilities.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "BPB", |
| "run_group": "TwitterAAE" |
| } |
| }, |
| { |
| "value": "ICE - BPB", |
| "description": "The International Corpus of English (ICE) drawn from English speakers from various places in the world, initiated by [Greenbaum (1991)](https://www.cambridge.org/core/journals/english-today/article/abs/ice-the-international-corpus-of-english/47808205394C538393C3FD8E62E5E701).\n\nBits/byte: Average number of bits per byte according to model probabilities.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "BPB", |
| "run_group": "ICE" |
| } |
| }, |
| { |
| "value": "BLiMP - EM", |
| "description": "The Benchmark of Linguistic Minimal Pairs for English (BLiMP) for measuring performance on linguistic phenomena using minimal pair design [(Warstadt et al., 2020)](https://aclanthology.org/2020.tacl-1.25/).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "BLiMP" |
| } |
| }, |
| { |
| "value": "NaturalQuestions (closed-book) - F1", |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\nF1: Average F1 score in terms of word overlap between the model output and correct reference.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "F1", |
| "run_group": "NaturalQuestions (closed-book)" |
| } |
| }, |
| { |
| "value": "HellaSwag - EM", |
| "description": "The HellaSwag benchmark for commonsense reasoning in question answering [(Zellers et al., 2019)](https://aclanthology.org/P19-1472/).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "HellaSwag" |
| } |
| }, |
| { |
| "value": "OpenbookQA - EM", |
| "description": "The OpenbookQA benchmark for commonsense-intensive open book question answering [(Mihaylov et al., 2018)](https://aclanthology.org/D18-1260/).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "OpenbookQA" |
| } |
| }, |
| { |
| "value": "TruthfulQA - EM", |
| "description": "The TruthfulQA benchmarking for measuring model truthfulness and commonsense knowledge in question answering [(Lin et al., 2022)](https://aclanthology.org/2022.acl-long.229/).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "TruthfulQA" |
| } |
| }, |
| { |
| "value": "MMLU - EM", |
| "description": "The Massive Multitask Language Understanding (MMLU) benchmark for knowledge-intensive question answering across 57 domains [(Hendrycks et al., 2021)](https://openreview.net/forum?id=d7KBjmI3GmQ).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "MMLU" |
| } |
| }, |
| { |
| "value": "WikiFact - EM", |
| "description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "WikiFact" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (abstract symbols) - EM", |
| "description": "Synthetic reasoning tasks defined using abstract symbols based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "Synthetic reasoning (abstract symbols)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (natural language) - F1", |
| "description": "Synthetic reasoning tasks defined using simple natural language based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\nF1 (set match): Average F1 score in terms of set overlap between the model predicted set and correct reference set.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "F1", |
| "run_group": "Synthetic reasoning (natural language)" |
| } |
| }, |
| { |
| "value": "bAbI - EM", |
| "description": "The bAbI benchmark for measuring understanding and reasoning [(Weston et al., 2015)](https://arxiv.org/pdf/1502.05698.pdf).\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "bAbI" |
| } |
| }, |
| { |
| "value": "Dyck - EM", |
| "description": "Scenario testing hierarchical reasoning through the Dyck formal languages [(Suzgun et al., 2019)](https://aclanthology.org/W19-3905/).\n\nExact match (final): Fraction of instances that the predicted output matches a correct reference exactly, ignoring text preceding the specified indicator (e.g., space).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "Dyck" |
| } |
| }, |
| { |
| "value": "GSM8K - EM", |
| "description": "The grade school math word problems dataset (GSM8K) for testing mathematical reasoning on grade-school math problems [(Cobbe et al., 2021)](https://arxiv.org/pdf/2110.14168.pdf).\n\nExact match (final): Fraction of instances that the predicted output matches a correct reference exactly, ignoring text preceding the specified indicator (e.g., space).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "GSM8K" |
| } |
| }, |
| { |
| "value": "MATH - Equivalent", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\nEquivalent: Fraction of model outputs that are mathematically equivalent to the correct reference.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "Equivalent", |
| "run_group": "MATH" |
| } |
| }, |
| { |
| "value": "MATH (chain-of-thought) - Equivalent (chain of thought)", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems with chain-of-thought style reasoning [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\nEquivalent (chain of thought): Fraction of model outputs that are mathematically equivalent to the correct reference when using chain-of-thought prompting.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "Equivalent (chain of thought)", |
| "run_group": "MATH (chain-of-thought)" |
| } |
| }, |
| { |
| "value": "HumanEval (Code) - pass@1", |
| "description": "The HumanEval benchmark for measuring functional correctness for synthesizing programs from docstrings [(Chen et al., 2021)](https://arxiv.org/pdf/2107.03374.pdf).\n\npass@1: Fraction of model outputs that pass the associated test cases.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "pass@1", |
| "run_group": "HumanEval (Code)" |
| } |
| }, |
| { |
| "value": "LSAT - EM", |
| "description": "The LSAT benchmark for measuring analytical reasoning on the Law School Admission Test (LSAT; [Zhong et al., 2021](https://arxiv.org/pdf/2104.06598.pdf)).\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "LSAT" |
| } |
| }, |
| { |
| "value": "LegalSupport - EM", |
| "description": "Scenario introduced in this work to measure fine-grained legal reasoning through reverse entailment.\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "LegalSupport" |
| } |
| }, |
| { |
| "value": "Data imputation - EM", |
| "description": "Scenario from [Mei et al. (2021)](https://ieeexplore.ieee.org/document/9458712/) that tests the ability to impute missing entities in a data table.\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "Data imputation" |
| } |
| }, |
| { |
| "value": "Entity matching - EM", |
| "description": "Scenario from Magellan [(Konda et al., 2016)](https://dl.acm.org/doi/10.14778/3007263.3007314) that tests the ability to determine if two entities match.\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "Entity matching" |
| } |
| }, |
| { |
| "value": "BBQ - EM", |
| "description": "The Bias Benchmark for Question Answering (BBQ) for measuring social bias in question answering in ambiguous and unambigous context [(Parrish et al., 2022)](https://aclanthology.org/2022.findings-acl.165/).\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "BBQ" |
| } |
| } |
| ], |
| "rows": [ |
| [ |
| { |
| "value": "EleutherAI/pythia-2.8b", |
| "description": "", |
| "markdown": false |
| }, |
| { |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "value": 0.27487719298245616, |
| "description": "min=0.16, mean=0.275, max=0.42, sum=1.374 (5)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "mmlu:subject=abstract_algebra,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=college_chemistry,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=computer_security,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=econometrics,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=us_foreign_policy,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical" |
| ] |
| }, |
| { |
| "value": 0.0075921845770110154, |
| "description": "min=0, mean=0.008, max=0.034, sum=0.076 (10)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "wikifact:k=5,subject=author,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=currency,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=discoverer_or_inventor,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=instance_of,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=medical_condition_treated,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=part_of,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=place_of_birth,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=plaintiff,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=position_held,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=symptoms_and_signs,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 0.05631067961165048, |
| "description": "min=0.006, mean=0.056, max=0.12, sum=0.169 (3)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "synthetic_reasoning:mode=induction,model=EleutherAI_pythia-2.8b", |
| "synthetic_reasoning:mode=pattern_match,model=EleutherAI_pythia-2.8b", |
| "synthetic_reasoning:mode=variable_substitution,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "value": 0.2052980132450331, |
| "description": "min=0.205, mean=0.205, max=0.205, sum=0.205 (1)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "babi_qa:task=all,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 0.352, |
| "description": "min=0.352, mean=0.352, max=0.352, sum=0.352 (1)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "dyck_language_np=3:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 0.002, |
| "description": "min=0.002, mean=0.002, max=0.002, sum=0.002 (1)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "gsm:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "value": 0.47443762781186094, |
| "description": "min=0.474, mean=0.474, max=0.474, sum=0.474 (1)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "legal_support,method=multiple_choice_joint:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 0.33282647584973163, |
| "description": "min=0.035, mean=0.333, max=0.631, sum=0.666 (2)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "entity_data_imputation:dataset=Buy,model=EleutherAI_pythia-2.8b", |
| "entity_data_imputation:dataset=Restaurant,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 0.2102538336045975, |
| "description": "min=0.154, mean=0.21, max=0.248, sum=0.631 (3)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "entity_matching:dataset=Abt_Buy,model=EleutherAI_pythia-2.8b", |
| "entity_matching:dataset=Beer,model=EleutherAI_pythia-2.8b", |
| "entity_matching:dataset=Dirty_iTunes_Amazon,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| } |
| ] |
| ], |
| "links": [ |
| { |
| "text": "LaTeX", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/targeted_evaluations_accuracy.tex" |
| }, |
| { |
| "text": "JSON", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/targeted_evaluations_accuracy.json" |
| } |
| ], |
| "name": "accuracy" |
| }, |
| { |
| "title": "General information", |
| "header": [ |
| { |
| "value": "Model", |
| "markdown": false, |
| "metadata": {} |
| }, |
| { |
| "value": "Mean win rate", |
| "description": "How many models this model outperforms on average (over columns).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": {} |
| }, |
| { |
| "value": "The Pile - # eval", |
| "description": "The Pile corpus for measuring lanugage model performance across various domains [(Gao et al., 2020)](https://arxiv.org/pdf/2101.00027.pdf).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "The Pile" |
| } |
| }, |
| { |
| "value": "The Pile - # train", |
| "description": "The Pile corpus for measuring lanugage model performance across various domains [(Gao et al., 2020)](https://arxiv.org/pdf/2101.00027.pdf).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "The Pile" |
| } |
| }, |
| { |
| "value": "The Pile - truncated", |
| "description": "The Pile corpus for measuring lanugage model performance across various domains [(Gao et al., 2020)](https://arxiv.org/pdf/2101.00027.pdf).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "The Pile" |
| } |
| }, |
| { |
| "value": "The Pile - # prompt tokens", |
| "description": "The Pile corpus for measuring lanugage model performance across various domains [(Gao et al., 2020)](https://arxiv.org/pdf/2101.00027.pdf).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "The Pile" |
| } |
| }, |
| { |
| "value": "The Pile - # output tokens", |
| "description": "The Pile corpus for measuring lanugage model performance across various domains [(Gao et al., 2020)](https://arxiv.org/pdf/2101.00027.pdf).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "The Pile" |
| } |
| }, |
| { |
| "value": "The Pile - # trials", |
| "description": "The Pile corpus for measuring lanugage model performance across various domains [(Gao et al., 2020)](https://arxiv.org/pdf/2101.00027.pdf).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "The Pile" |
| } |
| }, |
| { |
| "value": "TwitterAAE - # eval", |
| "description": "The TwitterAAE corpus of [Blodgett et al. (2016)](https://aclanthology.org/D16-1120/) for measuring language model performance in tweets as a function of speaker dialect.\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "TwitterAAE" |
| } |
| }, |
| { |
| "value": "TwitterAAE - # train", |
| "description": "The TwitterAAE corpus of [Blodgett et al. (2016)](https://aclanthology.org/D16-1120/) for measuring language model performance in tweets as a function of speaker dialect.\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "TwitterAAE" |
| } |
| }, |
| { |
| "value": "TwitterAAE - truncated", |
| "description": "The TwitterAAE corpus of [Blodgett et al. (2016)](https://aclanthology.org/D16-1120/) for measuring language model performance in tweets as a function of speaker dialect.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "TwitterAAE" |
| } |
| }, |
| { |
| "value": "TwitterAAE - # prompt tokens", |
| "description": "The TwitterAAE corpus of [Blodgett et al. (2016)](https://aclanthology.org/D16-1120/) for measuring language model performance in tweets as a function of speaker dialect.\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "TwitterAAE" |
| } |
| }, |
| { |
| "value": "TwitterAAE - # output tokens", |
| "description": "The TwitterAAE corpus of [Blodgett et al. (2016)](https://aclanthology.org/D16-1120/) for measuring language model performance in tweets as a function of speaker dialect.\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "TwitterAAE" |
| } |
| }, |
| { |
| "value": "TwitterAAE - # trials", |
| "description": "The TwitterAAE corpus of [Blodgett et al. (2016)](https://aclanthology.org/D16-1120/) for measuring language model performance in tweets as a function of speaker dialect.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "TwitterAAE" |
| } |
| }, |
| { |
| "value": "ICE - # eval", |
| "description": "The International Corpus of English (ICE) drawn from English speakers from various places in the world, initiated by [Greenbaum (1991)](https://www.cambridge.org/core/journals/english-today/article/abs/ice-the-international-corpus-of-english/47808205394C538393C3FD8E62E5E701).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "ICE" |
| } |
| }, |
| { |
| "value": "ICE - # train", |
| "description": "The International Corpus of English (ICE) drawn from English speakers from various places in the world, initiated by [Greenbaum (1991)](https://www.cambridge.org/core/journals/english-today/article/abs/ice-the-international-corpus-of-english/47808205394C538393C3FD8E62E5E701).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "ICE" |
| } |
| }, |
| { |
| "value": "ICE - truncated", |
| "description": "The International Corpus of English (ICE) drawn from English speakers from various places in the world, initiated by [Greenbaum (1991)](https://www.cambridge.org/core/journals/english-today/article/abs/ice-the-international-corpus-of-english/47808205394C538393C3FD8E62E5E701).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "ICE" |
| } |
| }, |
| { |
| "value": "ICE - # prompt tokens", |
| "description": "The International Corpus of English (ICE) drawn from English speakers from various places in the world, initiated by [Greenbaum (1991)](https://www.cambridge.org/core/journals/english-today/article/abs/ice-the-international-corpus-of-english/47808205394C538393C3FD8E62E5E701).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "ICE" |
| } |
| }, |
| { |
| "value": "ICE - # output tokens", |
| "description": "The International Corpus of English (ICE) drawn from English speakers from various places in the world, initiated by [Greenbaum (1991)](https://www.cambridge.org/core/journals/english-today/article/abs/ice-the-international-corpus-of-english/47808205394C538393C3FD8E62E5E701).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "ICE" |
| } |
| }, |
| { |
| "value": "ICE - # trials", |
| "description": "The International Corpus of English (ICE) drawn from English speakers from various places in the world, initiated by [Greenbaum (1991)](https://www.cambridge.org/core/journals/english-today/article/abs/ice-the-international-corpus-of-english/47808205394C538393C3FD8E62E5E701).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "ICE" |
| } |
| }, |
| { |
| "value": "BLiMP - # eval", |
| "description": "The Benchmark of Linguistic Minimal Pairs for English (BLiMP) for measuring performance on linguistic phenomena using minimal pair design [(Warstadt et al., 2020)](https://aclanthology.org/2020.tacl-1.25/).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "BLiMP" |
| } |
| }, |
| { |
| "value": "BLiMP - # train", |
| "description": "The Benchmark of Linguistic Minimal Pairs for English (BLiMP) for measuring performance on linguistic phenomena using minimal pair design [(Warstadt et al., 2020)](https://aclanthology.org/2020.tacl-1.25/).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "BLiMP" |
| } |
| }, |
| { |
| "value": "BLiMP - truncated", |
| "description": "The Benchmark of Linguistic Minimal Pairs for English (BLiMP) for measuring performance on linguistic phenomena using minimal pair design [(Warstadt et al., 2020)](https://aclanthology.org/2020.tacl-1.25/).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "BLiMP" |
| } |
| }, |
| { |
| "value": "BLiMP - # prompt tokens", |
| "description": "The Benchmark of Linguistic Minimal Pairs for English (BLiMP) for measuring performance on linguistic phenomena using minimal pair design [(Warstadt et al., 2020)](https://aclanthology.org/2020.tacl-1.25/).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "BLiMP" |
| } |
| }, |
| { |
| "value": "BLiMP - # output tokens", |
| "description": "The Benchmark of Linguistic Minimal Pairs for English (BLiMP) for measuring performance on linguistic phenomena using minimal pair design [(Warstadt et al., 2020)](https://aclanthology.org/2020.tacl-1.25/).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "BLiMP" |
| } |
| }, |
| { |
| "value": "BLiMP - # trials", |
| "description": "The Benchmark of Linguistic Minimal Pairs for English (BLiMP) for measuring performance on linguistic phenomena using minimal pair design [(Warstadt et al., 2020)](https://aclanthology.org/2020.tacl-1.25/).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "BLiMP" |
| } |
| }, |
| { |
| "value": "NaturalQuestions (closed-book) - # eval", |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "NaturalQuestions (closed-book)" |
| } |
| }, |
| { |
| "value": "NaturalQuestions (closed-book) - # train", |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "NaturalQuestions (closed-book)" |
| } |
| }, |
| { |
| "value": "NaturalQuestions (closed-book) - truncated", |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "NaturalQuestions (closed-book)" |
| } |
| }, |
| { |
| "value": "NaturalQuestions (closed-book) - # prompt tokens", |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "NaturalQuestions (closed-book)" |
| } |
| }, |
| { |
| "value": "NaturalQuestions (closed-book) - # output tokens", |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "NaturalQuestions (closed-book)" |
| } |
| }, |
| { |
| "value": "NaturalQuestions (closed-book) - # trials", |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "NaturalQuestions (closed-book)" |
| } |
| }, |
| { |
| "value": "HellaSwag - # eval", |
| "description": "The HellaSwag benchmark for commonsense reasoning in question answering [(Zellers et al., 2019)](https://aclanthology.org/P19-1472/).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "HellaSwag" |
| } |
| }, |
| { |
| "value": "HellaSwag - # train", |
| "description": "The HellaSwag benchmark for commonsense reasoning in question answering [(Zellers et al., 2019)](https://aclanthology.org/P19-1472/).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "HellaSwag" |
| } |
| }, |
| { |
| "value": "HellaSwag - truncated", |
| "description": "The HellaSwag benchmark for commonsense reasoning in question answering [(Zellers et al., 2019)](https://aclanthology.org/P19-1472/).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "HellaSwag" |
| } |
| }, |
| { |
| "value": "HellaSwag - # prompt tokens", |
| "description": "The HellaSwag benchmark for commonsense reasoning in question answering [(Zellers et al., 2019)](https://aclanthology.org/P19-1472/).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "HellaSwag" |
| } |
| }, |
| { |
| "value": "HellaSwag - # output tokens", |
| "description": "The HellaSwag benchmark for commonsense reasoning in question answering [(Zellers et al., 2019)](https://aclanthology.org/P19-1472/).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "HellaSwag" |
| } |
| }, |
| { |
| "value": "HellaSwag - # trials", |
| "description": "The HellaSwag benchmark for commonsense reasoning in question answering [(Zellers et al., 2019)](https://aclanthology.org/P19-1472/).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "HellaSwag" |
| } |
| }, |
| { |
| "value": "OpenbookQA - # eval", |
| "description": "The OpenbookQA benchmark for commonsense-intensive open book question answering [(Mihaylov et al., 2018)](https://aclanthology.org/D18-1260/).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "OpenbookQA" |
| } |
| }, |
| { |
| "value": "OpenbookQA - # train", |
| "description": "The OpenbookQA benchmark for commonsense-intensive open book question answering [(Mihaylov et al., 2018)](https://aclanthology.org/D18-1260/).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "OpenbookQA" |
| } |
| }, |
| { |
| "value": "OpenbookQA - truncated", |
| "description": "The OpenbookQA benchmark for commonsense-intensive open book question answering [(Mihaylov et al., 2018)](https://aclanthology.org/D18-1260/).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "OpenbookQA" |
| } |
| }, |
| { |
| "value": "OpenbookQA - # prompt tokens", |
| "description": "The OpenbookQA benchmark for commonsense-intensive open book question answering [(Mihaylov et al., 2018)](https://aclanthology.org/D18-1260/).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "OpenbookQA" |
| } |
| }, |
| { |
| "value": "OpenbookQA - # output tokens", |
| "description": "The OpenbookQA benchmark for commonsense-intensive open book question answering [(Mihaylov et al., 2018)](https://aclanthology.org/D18-1260/).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "OpenbookQA" |
| } |
| }, |
| { |
| "value": "OpenbookQA - # trials", |
| "description": "The OpenbookQA benchmark for commonsense-intensive open book question answering [(Mihaylov et al., 2018)](https://aclanthology.org/D18-1260/).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "OpenbookQA" |
| } |
| }, |
| { |
| "value": "TruthfulQA - # eval", |
| "description": "The TruthfulQA benchmarking for measuring model truthfulness and commonsense knowledge in question answering [(Lin et al., 2022)](https://aclanthology.org/2022.acl-long.229/).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "TruthfulQA" |
| } |
| }, |
| { |
| "value": "TruthfulQA - # train", |
| "description": "The TruthfulQA benchmarking for measuring model truthfulness and commonsense knowledge in question answering [(Lin et al., 2022)](https://aclanthology.org/2022.acl-long.229/).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "TruthfulQA" |
| } |
| }, |
| { |
| "value": "TruthfulQA - truncated", |
| "description": "The TruthfulQA benchmarking for measuring model truthfulness and commonsense knowledge in question answering [(Lin et al., 2022)](https://aclanthology.org/2022.acl-long.229/).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "TruthfulQA" |
| } |
| }, |
| { |
| "value": "TruthfulQA - # prompt tokens", |
| "description": "The TruthfulQA benchmarking for measuring model truthfulness and commonsense knowledge in question answering [(Lin et al., 2022)](https://aclanthology.org/2022.acl-long.229/).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "TruthfulQA" |
| } |
| }, |
| { |
| "value": "TruthfulQA - # output tokens", |
| "description": "The TruthfulQA benchmarking for measuring model truthfulness and commonsense knowledge in question answering [(Lin et al., 2022)](https://aclanthology.org/2022.acl-long.229/).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "TruthfulQA" |
| } |
| }, |
| { |
| "value": "TruthfulQA - # trials", |
| "description": "The TruthfulQA benchmarking for measuring model truthfulness and commonsense knowledge in question answering [(Lin et al., 2022)](https://aclanthology.org/2022.acl-long.229/).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "TruthfulQA" |
| } |
| }, |
| { |
| "value": "MMLU - # eval", |
| "description": "The Massive Multitask Language Understanding (MMLU) benchmark for knowledge-intensive question answering across 57 domains [(Hendrycks et al., 2021)](https://openreview.net/forum?id=d7KBjmI3GmQ).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "MMLU" |
| } |
| }, |
| { |
| "value": "MMLU - # train", |
| "description": "The Massive Multitask Language Understanding (MMLU) benchmark for knowledge-intensive question answering across 57 domains [(Hendrycks et al., 2021)](https://openreview.net/forum?id=d7KBjmI3GmQ).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "MMLU" |
| } |
| }, |
| { |
| "value": "MMLU - truncated", |
| "description": "The Massive Multitask Language Understanding (MMLU) benchmark for knowledge-intensive question answering across 57 domains [(Hendrycks et al., 2021)](https://openreview.net/forum?id=d7KBjmI3GmQ).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "MMLU" |
| } |
| }, |
| { |
| "value": "MMLU - # prompt tokens", |
| "description": "The Massive Multitask Language Understanding (MMLU) benchmark for knowledge-intensive question answering across 57 domains [(Hendrycks et al., 2021)](https://openreview.net/forum?id=d7KBjmI3GmQ).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "MMLU" |
| } |
| }, |
| { |
| "value": "MMLU - # output tokens", |
| "description": "The Massive Multitask Language Understanding (MMLU) benchmark for knowledge-intensive question answering across 57 domains [(Hendrycks et al., 2021)](https://openreview.net/forum?id=d7KBjmI3GmQ).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "MMLU" |
| } |
| }, |
| { |
| "value": "MMLU - # trials", |
| "description": "The Massive Multitask Language Understanding (MMLU) benchmark for knowledge-intensive question answering across 57 domains [(Hendrycks et al., 2021)](https://openreview.net/forum?id=d7KBjmI3GmQ).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "MMLU" |
| } |
| }, |
| { |
| "value": "WikiFact - # eval", |
| "description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "WikiFact" |
| } |
| }, |
| { |
| "value": "WikiFact - # train", |
| "description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "WikiFact" |
| } |
| }, |
| { |
| "value": "WikiFact - truncated", |
| "description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "WikiFact" |
| } |
| }, |
| { |
| "value": "WikiFact - # prompt tokens", |
| "description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "WikiFact" |
| } |
| }, |
| { |
| "value": "WikiFact - # output tokens", |
| "description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "WikiFact" |
| } |
| }, |
| { |
| "value": "WikiFact - # trials", |
| "description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "WikiFact" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (abstract symbols) - # eval", |
| "description": "Synthetic reasoning tasks defined using abstract symbols based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "Synthetic reasoning (abstract symbols)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (abstract symbols) - # train", |
| "description": "Synthetic reasoning tasks defined using abstract symbols based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "Synthetic reasoning (abstract symbols)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (abstract symbols) - truncated", |
| "description": "Synthetic reasoning tasks defined using abstract symbols based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "Synthetic reasoning (abstract symbols)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (abstract symbols) - # prompt tokens", |
| "description": "Synthetic reasoning tasks defined using abstract symbols based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "Synthetic reasoning (abstract symbols)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (abstract symbols) - # output tokens", |
| "description": "Synthetic reasoning tasks defined using abstract symbols based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "Synthetic reasoning (abstract symbols)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (abstract symbols) - # trials", |
| "description": "Synthetic reasoning tasks defined using abstract symbols based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "Synthetic reasoning (abstract symbols)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (natural language) - # eval", |
| "description": "Synthetic reasoning tasks defined using simple natural language based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "Synthetic reasoning (natural language)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (natural language) - # train", |
| "description": "Synthetic reasoning tasks defined using simple natural language based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "Synthetic reasoning (natural language)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (natural language) - truncated", |
| "description": "Synthetic reasoning tasks defined using simple natural language based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "Synthetic reasoning (natural language)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (natural language) - # prompt tokens", |
| "description": "Synthetic reasoning tasks defined using simple natural language based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "Synthetic reasoning (natural language)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (natural language) - # output tokens", |
| "description": "Synthetic reasoning tasks defined using simple natural language based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "Synthetic reasoning (natural language)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (natural language) - # trials", |
| "description": "Synthetic reasoning tasks defined using simple natural language based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "Synthetic reasoning (natural language)" |
| } |
| }, |
| { |
| "value": "bAbI - # eval", |
| "description": "The bAbI benchmark for measuring understanding and reasoning [(Weston et al., 2015)](https://arxiv.org/pdf/1502.05698.pdf).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "bAbI" |
| } |
| }, |
| { |
| "value": "bAbI - # train", |
| "description": "The bAbI benchmark for measuring understanding and reasoning [(Weston et al., 2015)](https://arxiv.org/pdf/1502.05698.pdf).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "bAbI" |
| } |
| }, |
| { |
| "value": "bAbI - truncated", |
| "description": "The bAbI benchmark for measuring understanding and reasoning [(Weston et al., 2015)](https://arxiv.org/pdf/1502.05698.pdf).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "bAbI" |
| } |
| }, |
| { |
| "value": "bAbI - # prompt tokens", |
| "description": "The bAbI benchmark for measuring understanding and reasoning [(Weston et al., 2015)](https://arxiv.org/pdf/1502.05698.pdf).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "bAbI" |
| } |
| }, |
| { |
| "value": "bAbI - # output tokens", |
| "description": "The bAbI benchmark for measuring understanding and reasoning [(Weston et al., 2015)](https://arxiv.org/pdf/1502.05698.pdf).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "bAbI" |
| } |
| }, |
| { |
| "value": "bAbI - # trials", |
| "description": "The bAbI benchmark for measuring understanding and reasoning [(Weston et al., 2015)](https://arxiv.org/pdf/1502.05698.pdf).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "bAbI" |
| } |
| }, |
| { |
| "value": "Dyck - # eval", |
| "description": "Scenario testing hierarchical reasoning through the Dyck formal languages [(Suzgun et al., 2019)](https://aclanthology.org/W19-3905/).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "Dyck" |
| } |
| }, |
| { |
| "value": "Dyck - # train", |
| "description": "Scenario testing hierarchical reasoning through the Dyck formal languages [(Suzgun et al., 2019)](https://aclanthology.org/W19-3905/).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "Dyck" |
| } |
| }, |
| { |
| "value": "Dyck - truncated", |
| "description": "Scenario testing hierarchical reasoning through the Dyck formal languages [(Suzgun et al., 2019)](https://aclanthology.org/W19-3905/).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "Dyck" |
| } |
| }, |
| { |
| "value": "Dyck - # prompt tokens", |
| "description": "Scenario testing hierarchical reasoning through the Dyck formal languages [(Suzgun et al., 2019)](https://aclanthology.org/W19-3905/).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "Dyck" |
| } |
| }, |
| { |
| "value": "Dyck - # output tokens", |
| "description": "Scenario testing hierarchical reasoning through the Dyck formal languages [(Suzgun et al., 2019)](https://aclanthology.org/W19-3905/).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "Dyck" |
| } |
| }, |
| { |
| "value": "Dyck - # trials", |
| "description": "Scenario testing hierarchical reasoning through the Dyck formal languages [(Suzgun et al., 2019)](https://aclanthology.org/W19-3905/).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "Dyck" |
| } |
| }, |
| { |
| "value": "GSM8K - # eval", |
| "description": "The grade school math word problems dataset (GSM8K) for testing mathematical reasoning on grade-school math problems [(Cobbe et al., 2021)](https://arxiv.org/pdf/2110.14168.pdf).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "GSM8K" |
| } |
| }, |
| { |
| "value": "GSM8K - # train", |
| "description": "The grade school math word problems dataset (GSM8K) for testing mathematical reasoning on grade-school math problems [(Cobbe et al., 2021)](https://arxiv.org/pdf/2110.14168.pdf).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "GSM8K" |
| } |
| }, |
| { |
| "value": "GSM8K - truncated", |
| "description": "The grade school math word problems dataset (GSM8K) for testing mathematical reasoning on grade-school math problems [(Cobbe et al., 2021)](https://arxiv.org/pdf/2110.14168.pdf).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "GSM8K" |
| } |
| }, |
| { |
| "value": "GSM8K - # prompt tokens", |
| "description": "The grade school math word problems dataset (GSM8K) for testing mathematical reasoning on grade-school math problems [(Cobbe et al., 2021)](https://arxiv.org/pdf/2110.14168.pdf).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "GSM8K" |
| } |
| }, |
| { |
| "value": "GSM8K - # output tokens", |
| "description": "The grade school math word problems dataset (GSM8K) for testing mathematical reasoning on grade-school math problems [(Cobbe et al., 2021)](https://arxiv.org/pdf/2110.14168.pdf).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "GSM8K" |
| } |
| }, |
| { |
| "value": "GSM8K - # trials", |
| "description": "The grade school math word problems dataset (GSM8K) for testing mathematical reasoning on grade-school math problems [(Cobbe et al., 2021)](https://arxiv.org/pdf/2110.14168.pdf).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "GSM8K" |
| } |
| }, |
| { |
| "value": "MATH - # eval", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "MATH" |
| } |
| }, |
| { |
| "value": "MATH - # train", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "MATH" |
| } |
| }, |
| { |
| "value": "MATH - truncated", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "MATH" |
| } |
| }, |
| { |
| "value": "MATH - # prompt tokens", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "MATH" |
| } |
| }, |
| { |
| "value": "MATH - # output tokens", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "MATH" |
| } |
| }, |
| { |
| "value": "MATH - # trials", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "MATH" |
| } |
| }, |
| { |
| "value": "MATH (chain-of-thought) - # eval", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems with chain-of-thought style reasoning [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "MATH (chain-of-thought)" |
| } |
| }, |
| { |
| "value": "MATH (chain-of-thought) - # train", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems with chain-of-thought style reasoning [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "MATH (chain-of-thought)" |
| } |
| }, |
| { |
| "value": "MATH (chain-of-thought) - truncated", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems with chain-of-thought style reasoning [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "MATH (chain-of-thought)" |
| } |
| }, |
| { |
| "value": "MATH (chain-of-thought) - # prompt tokens", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems with chain-of-thought style reasoning [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "MATH (chain-of-thought)" |
| } |
| }, |
| { |
| "value": "MATH (chain-of-thought) - # output tokens", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems with chain-of-thought style reasoning [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "MATH (chain-of-thought)" |
| } |
| }, |
| { |
| "value": "MATH (chain-of-thought) - # trials", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems with chain-of-thought style reasoning [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "MATH (chain-of-thought)" |
| } |
| }, |
| { |
| "value": "APPS (Code) - # eval", |
| "description": "The APPS benchmark for measuring competence on code challenges [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/c24cd76e1ce41366a4bbe8a49b02a028-Abstract-round2.html).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "APPS (Code)" |
| } |
| }, |
| { |
| "value": "APPS (Code) - # train", |
| "description": "The APPS benchmark for measuring competence on code challenges [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/c24cd76e1ce41366a4bbe8a49b02a028-Abstract-round2.html).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "APPS (Code)" |
| } |
| }, |
| { |
| "value": "APPS (Code) - truncated", |
| "description": "The APPS benchmark for measuring competence on code challenges [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/c24cd76e1ce41366a4bbe8a49b02a028-Abstract-round2.html).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "APPS (Code)" |
| } |
| }, |
| { |
| "value": "APPS (Code) - # prompt tokens", |
| "description": "The APPS benchmark for measuring competence on code challenges [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/c24cd76e1ce41366a4bbe8a49b02a028-Abstract-round2.html).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "APPS (Code)" |
| } |
| }, |
| { |
| "value": "APPS (Code) - # output tokens", |
| "description": "The APPS benchmark for measuring competence on code challenges [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/c24cd76e1ce41366a4bbe8a49b02a028-Abstract-round2.html).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "APPS (Code)" |
| } |
| }, |
| { |
| "value": "APPS (Code) - # trials", |
| "description": "The APPS benchmark for measuring competence on code challenges [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/c24cd76e1ce41366a4bbe8a49b02a028-Abstract-round2.html).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "APPS (Code)" |
| } |
| }, |
| { |
| "value": "HumanEval (Code) - # eval", |
| "description": "The HumanEval benchmark for measuring functional correctness for synthesizing programs from docstrings [(Chen et al., 2021)](https://arxiv.org/pdf/2107.03374.pdf).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "HumanEval (Code)" |
| } |
| }, |
| { |
| "value": "HumanEval (Code) - # train", |
| "description": "The HumanEval benchmark for measuring functional correctness for synthesizing programs from docstrings [(Chen et al., 2021)](https://arxiv.org/pdf/2107.03374.pdf).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "HumanEval (Code)" |
| } |
| }, |
| { |
| "value": "HumanEval (Code) - truncated", |
| "description": "The HumanEval benchmark for measuring functional correctness for synthesizing programs from docstrings [(Chen et al., 2021)](https://arxiv.org/pdf/2107.03374.pdf).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "HumanEval (Code)" |
| } |
| }, |
| { |
| "value": "HumanEval (Code) - # prompt tokens", |
| "description": "The HumanEval benchmark for measuring functional correctness for synthesizing programs from docstrings [(Chen et al., 2021)](https://arxiv.org/pdf/2107.03374.pdf).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "HumanEval (Code)" |
| } |
| }, |
| { |
| "value": "HumanEval (Code) - # output tokens", |
| "description": "The HumanEval benchmark for measuring functional correctness for synthesizing programs from docstrings [(Chen et al., 2021)](https://arxiv.org/pdf/2107.03374.pdf).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "HumanEval (Code)" |
| } |
| }, |
| { |
| "value": "HumanEval (Code) - # trials", |
| "description": "The HumanEval benchmark for measuring functional correctness for synthesizing programs from docstrings [(Chen et al., 2021)](https://arxiv.org/pdf/2107.03374.pdf).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "HumanEval (Code)" |
| } |
| }, |
| { |
| "value": "LSAT - # eval", |
| "description": "The LSAT benchmark for measuring analytical reasoning on the Law School Admission Test (LSAT; [Zhong et al., 2021](https://arxiv.org/pdf/2104.06598.pdf)).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "LSAT" |
| } |
| }, |
| { |
| "value": "LSAT - # train", |
| "description": "The LSAT benchmark for measuring analytical reasoning on the Law School Admission Test (LSAT; [Zhong et al., 2021](https://arxiv.org/pdf/2104.06598.pdf)).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "LSAT" |
| } |
| }, |
| { |
| "value": "LSAT - truncated", |
| "description": "The LSAT benchmark for measuring analytical reasoning on the Law School Admission Test (LSAT; [Zhong et al., 2021](https://arxiv.org/pdf/2104.06598.pdf)).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "LSAT" |
| } |
| }, |
| { |
| "value": "LSAT - # prompt tokens", |
| "description": "The LSAT benchmark for measuring analytical reasoning on the Law School Admission Test (LSAT; [Zhong et al., 2021](https://arxiv.org/pdf/2104.06598.pdf)).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "LSAT" |
| } |
| }, |
| { |
| "value": "LSAT - # output tokens", |
| "description": "The LSAT benchmark for measuring analytical reasoning on the Law School Admission Test (LSAT; [Zhong et al., 2021](https://arxiv.org/pdf/2104.06598.pdf)).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "LSAT" |
| } |
| }, |
| { |
| "value": "LSAT - # trials", |
| "description": "The LSAT benchmark for measuring analytical reasoning on the Law School Admission Test (LSAT; [Zhong et al., 2021](https://arxiv.org/pdf/2104.06598.pdf)).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "LSAT" |
| } |
| }, |
| { |
| "value": "LegalSupport - # eval", |
| "description": "Scenario introduced in this work to measure fine-grained legal reasoning through reverse entailment.\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "LegalSupport" |
| } |
| }, |
| { |
| "value": "LegalSupport - # train", |
| "description": "Scenario introduced in this work to measure fine-grained legal reasoning through reverse entailment.\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "LegalSupport" |
| } |
| }, |
| { |
| "value": "LegalSupport - truncated", |
| "description": "Scenario introduced in this work to measure fine-grained legal reasoning through reverse entailment.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "LegalSupport" |
| } |
| }, |
| { |
| "value": "LegalSupport - # prompt tokens", |
| "description": "Scenario introduced in this work to measure fine-grained legal reasoning through reverse entailment.\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "LegalSupport" |
| } |
| }, |
| { |
| "value": "LegalSupport - # output tokens", |
| "description": "Scenario introduced in this work to measure fine-grained legal reasoning through reverse entailment.\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "LegalSupport" |
| } |
| }, |
| { |
| "value": "LegalSupport - # trials", |
| "description": "Scenario introduced in this work to measure fine-grained legal reasoning through reverse entailment.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "LegalSupport" |
| } |
| }, |
| { |
| "value": "Data imputation - # eval", |
| "description": "Scenario from [Mei et al. (2021)](https://ieeexplore.ieee.org/document/9458712/) that tests the ability to impute missing entities in a data table.\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "Data imputation" |
| } |
| }, |
| { |
| "value": "Data imputation - # train", |
| "description": "Scenario from [Mei et al. (2021)](https://ieeexplore.ieee.org/document/9458712/) that tests the ability to impute missing entities in a data table.\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "Data imputation" |
| } |
| }, |
| { |
| "value": "Data imputation - truncated", |
| "description": "Scenario from [Mei et al. (2021)](https://ieeexplore.ieee.org/document/9458712/) that tests the ability to impute missing entities in a data table.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "Data imputation" |
| } |
| }, |
| { |
| "value": "Data imputation - # prompt tokens", |
| "description": "Scenario from [Mei et al. (2021)](https://ieeexplore.ieee.org/document/9458712/) that tests the ability to impute missing entities in a data table.\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "Data imputation" |
| } |
| }, |
| { |
| "value": "Data imputation - # output tokens", |
| "description": "Scenario from [Mei et al. (2021)](https://ieeexplore.ieee.org/document/9458712/) that tests the ability to impute missing entities in a data table.\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "Data imputation" |
| } |
| }, |
| { |
| "value": "Data imputation - # trials", |
| "description": "Scenario from [Mei et al. (2021)](https://ieeexplore.ieee.org/document/9458712/) that tests the ability to impute missing entities in a data table.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "Data imputation" |
| } |
| }, |
| { |
| "value": "Entity matching - # eval", |
| "description": "Scenario from Magellan [(Konda et al., 2016)](https://dl.acm.org/doi/10.14778/3007263.3007314) that tests the ability to determine if two entities match.\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "Entity matching" |
| } |
| }, |
| { |
| "value": "Entity matching - # train", |
| "description": "Scenario from Magellan [(Konda et al., 2016)](https://dl.acm.org/doi/10.14778/3007263.3007314) that tests the ability to determine if two entities match.\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "Entity matching" |
| } |
| }, |
| { |
| "value": "Entity matching - truncated", |
| "description": "Scenario from Magellan [(Konda et al., 2016)](https://dl.acm.org/doi/10.14778/3007263.3007314) that tests the ability to determine if two entities match.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "Entity matching" |
| } |
| }, |
| { |
| "value": "Entity matching - # prompt tokens", |
| "description": "Scenario from Magellan [(Konda et al., 2016)](https://dl.acm.org/doi/10.14778/3007263.3007314) that tests the ability to determine if two entities match.\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "Entity matching" |
| } |
| }, |
| { |
| "value": "Entity matching - # output tokens", |
| "description": "Scenario from Magellan [(Konda et al., 2016)](https://dl.acm.org/doi/10.14778/3007263.3007314) that tests the ability to determine if two entities match.\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "Entity matching" |
| } |
| }, |
| { |
| "value": "Entity matching - # trials", |
| "description": "Scenario from Magellan [(Konda et al., 2016)](https://dl.acm.org/doi/10.14778/3007263.3007314) that tests the ability to determine if two entities match.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "Entity matching" |
| } |
| }, |
| { |
| "value": "Copyright (text) - # eval", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for books, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "Copyright (text)" |
| } |
| }, |
| { |
| "value": "Copyright (text) - # train", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for books, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "Copyright (text)" |
| } |
| }, |
| { |
| "value": "Copyright (text) - truncated", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for books, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "Copyright (text)" |
| } |
| }, |
| { |
| "value": "Copyright (text) - # prompt tokens", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for books, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "Copyright (text)" |
| } |
| }, |
| { |
| "value": "Copyright (text) - # output tokens", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for books, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "Copyright (text)" |
| } |
| }, |
| { |
| "value": "Copyright (text) - # trials", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for books, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "Copyright (text)" |
| } |
| }, |
| { |
| "value": "Copyright (code) - # eval", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for code, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "Copyright (code)" |
| } |
| }, |
| { |
| "value": "Copyright (code) - # train", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for code, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "Copyright (code)" |
| } |
| }, |
| { |
| "value": "Copyright (code) - truncated", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for code, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "Copyright (code)" |
| } |
| }, |
| { |
| "value": "Copyright (code) - # prompt tokens", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for code, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "Copyright (code)" |
| } |
| }, |
| { |
| "value": "Copyright (code) - # output tokens", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for code, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "Copyright (code)" |
| } |
| }, |
| { |
| "value": "Copyright (code) - # trials", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for code, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "Copyright (code)" |
| } |
| }, |
| { |
| "value": "Disinformation (reiteration) - # eval", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to reiterate disinformation content.\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "Disinformation (reiteration)" |
| } |
| }, |
| { |
| "value": "Disinformation (reiteration) - # train", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to reiterate disinformation content.\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "Disinformation (reiteration)" |
| } |
| }, |
| { |
| "value": "Disinformation (reiteration) - truncated", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to reiterate disinformation content.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "Disinformation (reiteration)" |
| } |
| }, |
| { |
| "value": "Disinformation (reiteration) - # prompt tokens", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to reiterate disinformation content.\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "Disinformation (reiteration)" |
| } |
| }, |
| { |
| "value": "Disinformation (reiteration) - # output tokens", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to reiterate disinformation content.\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "Disinformation (reiteration)" |
| } |
| }, |
| { |
| "value": "Disinformation (reiteration) - # trials", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to reiterate disinformation content.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "Disinformation (reiteration)" |
| } |
| }, |
| { |
| "value": "Disinformation (wedging) - # eval", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to generate divisive and wedging content.\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "Disinformation (wedging)" |
| } |
| }, |
| { |
| "value": "Disinformation (wedging) - # train", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to generate divisive and wedging content.\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "Disinformation (wedging)" |
| } |
| }, |
| { |
| "value": "Disinformation (wedging) - truncated", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to generate divisive and wedging content.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "Disinformation (wedging)" |
| } |
| }, |
| { |
| "value": "Disinformation (wedging) - # prompt tokens", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to generate divisive and wedging content.\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "Disinformation (wedging)" |
| } |
| }, |
| { |
| "value": "Disinformation (wedging) - # output tokens", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to generate divisive and wedging content.\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "Disinformation (wedging)" |
| } |
| }, |
| { |
| "value": "Disinformation (wedging) - # trials", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to generate divisive and wedging content.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "Disinformation (wedging)" |
| } |
| }, |
| { |
| "value": "BBQ - # eval", |
| "description": "The Bias Benchmark for Question Answering (BBQ) for measuring social bias in question answering in ambiguous and unambigous context [(Parrish et al., 2022)](https://aclanthology.org/2022.findings-acl.165/).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "BBQ" |
| } |
| }, |
| { |
| "value": "BBQ - # train", |
| "description": "The Bias Benchmark for Question Answering (BBQ) for measuring social bias in question answering in ambiguous and unambigous context [(Parrish et al., 2022)](https://aclanthology.org/2022.findings-acl.165/).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "BBQ" |
| } |
| }, |
| { |
| "value": "BBQ - truncated", |
| "description": "The Bias Benchmark for Question Answering (BBQ) for measuring social bias in question answering in ambiguous and unambigous context [(Parrish et al., 2022)](https://aclanthology.org/2022.findings-acl.165/).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "BBQ" |
| } |
| }, |
| { |
| "value": "BBQ - # prompt tokens", |
| "description": "The Bias Benchmark for Question Answering (BBQ) for measuring social bias in question answering in ambiguous and unambigous context [(Parrish et al., 2022)](https://aclanthology.org/2022.findings-acl.165/).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "BBQ" |
| } |
| }, |
| { |
| "value": "BBQ - # output tokens", |
| "description": "The Bias Benchmark for Question Answering (BBQ) for measuring social bias in question answering in ambiguous and unambigous context [(Parrish et al., 2022)](https://aclanthology.org/2022.findings-acl.165/).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "BBQ" |
| } |
| }, |
| { |
| "value": "BBQ - # trials", |
| "description": "The Bias Benchmark for Question Answering (BBQ) for measuring social bias in question answering in ambiguous and unambigous context [(Parrish et al., 2022)](https://aclanthology.org/2022.findings-acl.165/).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "BBQ" |
| } |
| }, |
| { |
| "value": "BOLD - # eval", |
| "description": "The Bias in Open-Ended Language Generation Dataset (BOLD) for measuring biases and toxicity in open-ended language generation [(Dhamala et al., 2021)](https://dl.acm.org/doi/10.1145/3442188.3445924).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "BOLD" |
| } |
| }, |
| { |
| "value": "BOLD - # train", |
| "description": "The Bias in Open-Ended Language Generation Dataset (BOLD) for measuring biases and toxicity in open-ended language generation [(Dhamala et al., 2021)](https://dl.acm.org/doi/10.1145/3442188.3445924).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "BOLD" |
| } |
| }, |
| { |
| "value": "BOLD - truncated", |
| "description": "The Bias in Open-Ended Language Generation Dataset (BOLD) for measuring biases and toxicity in open-ended language generation [(Dhamala et al., 2021)](https://dl.acm.org/doi/10.1145/3442188.3445924).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "BOLD" |
| } |
| }, |
| { |
| "value": "BOLD - # prompt tokens", |
| "description": "The Bias in Open-Ended Language Generation Dataset (BOLD) for measuring biases and toxicity in open-ended language generation [(Dhamala et al., 2021)](https://dl.acm.org/doi/10.1145/3442188.3445924).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "BOLD" |
| } |
| }, |
| { |
| "value": "BOLD - # output tokens", |
| "description": "The Bias in Open-Ended Language Generation Dataset (BOLD) for measuring biases and toxicity in open-ended language generation [(Dhamala et al., 2021)](https://dl.acm.org/doi/10.1145/3442188.3445924).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "BOLD" |
| } |
| }, |
| { |
| "value": "BOLD - # trials", |
| "description": "The Bias in Open-Ended Language Generation Dataset (BOLD) for measuring biases and toxicity in open-ended language generation [(Dhamala et al., 2021)](https://dl.acm.org/doi/10.1145/3442188.3445924).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "BOLD" |
| } |
| }, |
| { |
| "value": "RealToxicityPrompts - # eval", |
| "description": "The RealToxicityPrompts dataset for measuring toxicity in prompted model generations [(Gehman et al., 2020)](https://aclanthology.org/2020.findings-emnlp.301/).\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "RealToxicityPrompts" |
| } |
| }, |
| { |
| "value": "RealToxicityPrompts - # train", |
| "description": "The RealToxicityPrompts dataset for measuring toxicity in prompted model generations [(Gehman et al., 2020)](https://aclanthology.org/2020.findings-emnlp.301/).\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "RealToxicityPrompts" |
| } |
| }, |
| { |
| "value": "RealToxicityPrompts - truncated", |
| "description": "The RealToxicityPrompts dataset for measuring toxicity in prompted model generations [(Gehman et al., 2020)](https://aclanthology.org/2020.findings-emnlp.301/).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "RealToxicityPrompts" |
| } |
| }, |
| { |
| "value": "RealToxicityPrompts - # prompt tokens", |
| "description": "The RealToxicityPrompts dataset for measuring toxicity in prompted model generations [(Gehman et al., 2020)](https://aclanthology.org/2020.findings-emnlp.301/).\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "RealToxicityPrompts" |
| } |
| }, |
| { |
| "value": "RealToxicityPrompts - # output tokens", |
| "description": "The RealToxicityPrompts dataset for measuring toxicity in prompted model generations [(Gehman et al., 2020)](https://aclanthology.org/2020.findings-emnlp.301/).\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "RealToxicityPrompts" |
| } |
| }, |
| { |
| "value": "RealToxicityPrompts - # trials", |
| "description": "The RealToxicityPrompts dataset for measuring toxicity in prompted model generations [(Gehman et al., 2020)](https://aclanthology.org/2020.findings-emnlp.301/).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "RealToxicityPrompts" |
| } |
| }, |
| { |
| "value": "Synthetic efficiency - # eval", |
| "description": "Scenario introduced in this work to better understand inference runtime performance of various models.\n\n# eval: Number of evaluation instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# eval", |
| "run_group": "Synthetic efficiency" |
| } |
| }, |
| { |
| "value": "Synthetic efficiency - # train", |
| "description": "Scenario introduced in this work to better understand inference runtime performance of various models.\n\n# train: Number of training instances (e.g., in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "# train", |
| "run_group": "Synthetic efficiency" |
| } |
| }, |
| { |
| "value": "Synthetic efficiency - truncated", |
| "description": "Scenario introduced in this work to better understand inference runtime performance of various models.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).", |
| "markdown": false, |
| "metadata": { |
| "metric": "truncated", |
| "run_group": "Synthetic efficiency" |
| } |
| }, |
| { |
| "value": "Synthetic efficiency - # prompt tokens", |
| "description": "Scenario introduced in this work to better understand inference runtime performance of various models.\n\n# prompt tokens: Number of tokens in the prompt.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# prompt tokens", |
| "run_group": "Synthetic efficiency" |
| } |
| }, |
| { |
| "value": "Synthetic efficiency - # output tokens", |
| "description": "Scenario introduced in this work to better understand inference runtime performance of various models.\n\n# output tokens: Actual number of output tokens.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# output tokens", |
| "run_group": "Synthetic efficiency" |
| } |
| }, |
| { |
| "value": "Synthetic efficiency - # trials", |
| "description": "Scenario introduced in this work to better understand inference runtime performance of various models.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.", |
| "markdown": false, |
| "metadata": { |
| "metric": "# trials", |
| "run_group": "Synthetic efficiency" |
| } |
| } |
| ], |
| "rows": [ |
| [ |
| { |
| "value": "EleutherAI/pythia-2.8b", |
| "description": "", |
| "markdown": false |
| }, |
| { |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "value": 102.8, |
| "description": "min=100, mean=102.8, max=114, sum=514 (5)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "mmlu:subject=abstract_algebra,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=college_chemistry,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=computer_security,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=econometrics,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=us_foreign_policy,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical" |
| ] |
| }, |
| { |
| "value": 5.0, |
| "description": "min=5, mean=5, max=5, sum=25 (5)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "mmlu:subject=abstract_algebra,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=college_chemistry,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=computer_security,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=econometrics,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=us_foreign_policy,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical" |
| ] |
| }, |
| { |
| "value": 0.0, |
| "description": "min=0, mean=0, max=0, sum=0 (5)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "mmlu:subject=abstract_algebra,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=college_chemistry,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=computer_security,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=econometrics,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=us_foreign_policy,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical" |
| ] |
| }, |
| { |
| "value": 467.935649122807, |
| "description": "min=358.76, mean=467.936, max=612.798, sum=2339.678 (5)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "mmlu:subject=abstract_algebra,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=college_chemistry,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=computer_security,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=econometrics,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=us_foreign_policy,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical" |
| ] |
| }, |
| { |
| "value": 1.0, |
| "description": "min=1, mean=1, max=1, sum=5 (5)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "mmlu:subject=abstract_algebra,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=college_chemistry,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=computer_security,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=econometrics,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=us_foreign_policy,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical" |
| ] |
| }, |
| { |
| "value": 1.0, |
| "description": "min=1, mean=1, max=1, sum=5 (5)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "mmlu:subject=abstract_algebra,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=college_chemistry,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=computer_security,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=econometrics,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=us_foreign_policy,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical" |
| ] |
| }, |
| { |
| "value": 746.2, |
| "description": "min=96, mean=746.2, max=850, sum=7462 (10)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "wikifact:k=5,subject=author,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=currency,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=discoverer_or_inventor,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=instance_of,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=medical_condition_treated,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=part_of,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=place_of_birth,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=plaintiff,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=position_held,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=symptoms_and_signs,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 5.0, |
| "description": "min=5, mean=5, max=5, sum=50 (10)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "wikifact:k=5,subject=author,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=currency,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=discoverer_or_inventor,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=instance_of,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=medical_condition_treated,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=part_of,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=place_of_birth,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=plaintiff,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=position_held,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=symptoms_and_signs,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 0.0, |
| "description": "min=0, mean=0, max=0, sum=0 (10)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "wikifact:k=5,subject=author,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=currency,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=discoverer_or_inventor,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=instance_of,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=medical_condition_treated,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=part_of,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=place_of_birth,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=plaintiff,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=position_held,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=symptoms_and_signs,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 83.9422429501836, |
| "description": "min=65.855, mean=83.942, max=128.292, sum=839.422 (10)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "wikifact:k=5,subject=author,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=currency,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=discoverer_or_inventor,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=instance_of,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=medical_condition_treated,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=part_of,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=place_of_birth,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=plaintiff,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=position_held,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=symptoms_and_signs,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 4.5395219202522, |
| "description": "min=3.02, mean=4.54, max=6.332, sum=45.395 (10)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "wikifact:k=5,subject=author,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=currency,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=discoverer_or_inventor,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=instance_of,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=medical_condition_treated,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=part_of,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=place_of_birth,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=plaintiff,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=position_held,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=symptoms_and_signs,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 1.0, |
| "description": "min=1, mean=1, max=1, sum=10 (10)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "wikifact:k=5,subject=author,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=currency,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=discoverer_or_inventor,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=instance_of,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=medical_condition_treated,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=part_of,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=place_of_birth,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=plaintiff,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=position_held,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=symptoms_and_signs,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 515.0, |
| "description": "min=515, mean=515, max=515, sum=1545 (3)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "synthetic_reasoning:mode=induction,model=EleutherAI_pythia-2.8b", |
| "synthetic_reasoning:mode=pattern_match,model=EleutherAI_pythia-2.8b", |
| "synthetic_reasoning:mode=variable_substitution,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 5.0, |
| "description": "min=5, mean=5, max=5, sum=15 (3)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "synthetic_reasoning:mode=induction,model=EleutherAI_pythia-2.8b", |
| "synthetic_reasoning:mode=pattern_match,model=EleutherAI_pythia-2.8b", |
| "synthetic_reasoning:mode=variable_substitution,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 0.0, |
| "description": "min=0, mean=0, max=0, sum=0 (3)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "synthetic_reasoning:mode=induction,model=EleutherAI_pythia-2.8b", |
| "synthetic_reasoning:mode=pattern_match,model=EleutherAI_pythia-2.8b", |
| "synthetic_reasoning:mode=variable_substitution,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 210.77216828478961, |
| "description": "min=170.79, mean=210.772, max=234.728, sum=632.317 (3)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "synthetic_reasoning:mode=induction,model=EleutherAI_pythia-2.8b", |
| "synthetic_reasoning:mode=pattern_match,model=EleutherAI_pythia-2.8b", |
| "synthetic_reasoning:mode=variable_substitution,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 8.937864077669902, |
| "description": "min=6, mean=8.938, max=11.14, sum=26.814 (3)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "synthetic_reasoning:mode=induction,model=EleutherAI_pythia-2.8b", |
| "synthetic_reasoning:mode=pattern_match,model=EleutherAI_pythia-2.8b", |
| "synthetic_reasoning:mode=variable_substitution,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 1.0, |
| "description": "min=1, mean=1, max=1, sum=3 (3)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "synthetic_reasoning:mode=induction,model=EleutherAI_pythia-2.8b", |
| "synthetic_reasoning:mode=pattern_match,model=EleutherAI_pythia-2.8b", |
| "synthetic_reasoning:mode=variable_substitution,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "value": 906.0, |
| "description": "min=906, mean=906, max=906, sum=906 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "babi_qa:task=all,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 5.0, |
| "description": "min=5, mean=5, max=5, sum=5 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "babi_qa:task=all,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 0.0, |
| "description": "min=0, mean=0, max=0, sum=0 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "babi_qa:task=all,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 434.6710816777042, |
| "description": "min=434.671, mean=434.671, max=434.671, sum=434.671 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "babi_qa:task=all,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 1.1070640176600441, |
| "description": "min=1.107, mean=1.107, max=1.107, sum=1.107 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "babi_qa:task=all,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 1.0, |
| "description": "min=1, mean=1, max=1, sum=1 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "babi_qa:task=all,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 500.0, |
| "description": "min=500, mean=500, max=500, sum=500 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "dyck_language_np=3:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 3.0, |
| "description": "min=3, mean=3, max=3, sum=3 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "dyck_language_np=3:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 0.0, |
| "description": "min=0, mean=0, max=0, sum=0 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "dyck_language_np=3:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 164.418, |
| "description": "min=164.418, mean=164.418, max=164.418, sum=164.418 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "dyck_language_np=3:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 4.474, |
| "description": "min=4.474, mean=4.474, max=4.474, sum=4.474 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "dyck_language_np=3:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 1.0, |
| "description": "min=1, mean=1, max=1, sum=1 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "dyck_language_np=3:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 1000.0, |
| "description": "min=1000, mean=1000, max=1000, sum=1000 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "gsm:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 5.0, |
| "description": "min=5, mean=5, max=5, sum=5 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "gsm:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 0.0, |
| "description": "min=0, mean=0, max=0, sum=0 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "gsm:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 939.582, |
| "description": "min=939.582, mean=939.582, max=939.582, sum=939.582 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "gsm:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 131.428, |
| "description": "min=131.428, mean=131.428, max=131.428, sum=131.428 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "gsm:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 1.0, |
| "description": "min=1, mean=1, max=1, sum=1 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "gsm:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "value": 489.0, |
| "description": "min=489, mean=489, max=489, sum=489 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "legal_support,method=multiple_choice_joint:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 3.0, |
| "description": "min=3, mean=3, max=3, sum=3 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "legal_support,method=multiple_choice_joint:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 0.0, |
| "description": "min=0, mean=0, max=0, sum=0 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "legal_support,method=multiple_choice_joint:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 597.3292433537832, |
| "description": "min=597.329, mean=597.329, max=597.329, sum=597.329 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "legal_support,method=multiple_choice_joint:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 1.0, |
| "description": "min=1, mean=1, max=1, sum=1 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "legal_support,method=multiple_choice_joint:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 1.0, |
| "description": "min=1, mean=1, max=1, sum=1 (1)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "legal_support,method=multiple_choice_joint:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 75.5, |
| "description": "min=65, mean=75.5, max=86, sum=151 (2)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "entity_data_imputation:dataset=Buy,model=EleutherAI_pythia-2.8b", |
| "entity_data_imputation:dataset=Restaurant,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 5.0, |
| "description": "min=5, mean=5, max=5, sum=10 (2)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "entity_data_imputation:dataset=Buy,model=EleutherAI_pythia-2.8b", |
| "entity_data_imputation:dataset=Restaurant,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 0.0, |
| "description": "min=0, mean=0, max=0, sum=0 (2)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "entity_data_imputation:dataset=Buy,model=EleutherAI_pythia-2.8b", |
| "entity_data_imputation:dataset=Restaurant,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 291.5386404293381, |
| "description": "min=256.047, mean=291.539, max=327.031, sum=583.077 (2)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "entity_data_imputation:dataset=Buy,model=EleutherAI_pythia-2.8b", |
| "entity_data_imputation:dataset=Restaurant,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 3.2625223613595704, |
| "description": "min=2.769, mean=3.263, max=3.756, sum=6.525 (2)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "entity_data_imputation:dataset=Buy,model=EleutherAI_pythia-2.8b", |
| "entity_data_imputation:dataset=Restaurant,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 1.0, |
| "description": "min=1, mean=1, max=1, sum=2 (2)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "entity_data_imputation:dataset=Buy,model=EleutherAI_pythia-2.8b", |
| "entity_data_imputation:dataset=Restaurant,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 231.0, |
| "description": "min=91, mean=231, max=493, sum=693 (3)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "entity_matching:dataset=Abt_Buy,model=EleutherAI_pythia-2.8b", |
| "entity_matching:dataset=Beer,model=EleutherAI_pythia-2.8b", |
| "entity_matching:dataset=Dirty_iTunes_Amazon,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 5.0, |
| "description": "min=5, mean=5, max=5, sum=15 (3)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "entity_matching:dataset=Abt_Buy,model=EleutherAI_pythia-2.8b", |
| "entity_matching:dataset=Beer,model=EleutherAI_pythia-2.8b", |
| "entity_matching:dataset=Dirty_iTunes_Amazon,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 0.0, |
| "description": "min=0, mean=0, max=0, sum=0 (3)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "entity_matching:dataset=Abt_Buy,model=EleutherAI_pythia-2.8b", |
| "entity_matching:dataset=Beer,model=EleutherAI_pythia-2.8b", |
| "entity_matching:dataset=Dirty_iTunes_Amazon,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 930.4999036482186, |
| "description": "min=599.978, mean=930.5, max=1179.477, sum=2791.5 (3)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "entity_matching:dataset=Abt_Buy,model=EleutherAI_pythia-2.8b", |
| "entity_matching:dataset=Beer,model=EleutherAI_pythia-2.8b", |
| "entity_matching:dataset=Dirty_iTunes_Amazon,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 1.0, |
| "description": "min=1, mean=1, max=1, sum=3 (3)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "entity_matching:dataset=Abt_Buy,model=EleutherAI_pythia-2.8b", |
| "entity_matching:dataset=Beer,model=EleutherAI_pythia-2.8b", |
| "entity_matching:dataset=Dirty_iTunes_Amazon,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 1.0, |
| "description": "min=1, mean=1, max=1, sum=3 (3)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "entity_matching:dataset=Abt_Buy,model=EleutherAI_pythia-2.8b", |
| "entity_matching:dataset=Beer,model=EleutherAI_pythia-2.8b", |
| "entity_matching:dataset=Dirty_iTunes_Amazon,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| } |
| ] |
| ], |
| "links": [ |
| { |
| "text": "LaTeX", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/targeted_evaluations_general_information.tex" |
| }, |
| { |
| "text": "JSON", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/targeted_evaluations_general_information.json" |
| } |
| ], |
| "name": "general_information" |
| }, |
| { |
| "title": "Calibration", |
| "header": [ |
| { |
| "value": "Model", |
| "markdown": false, |
| "metadata": {} |
| }, |
| { |
| "value": "Mean win rate", |
| "description": "How many models this model outperforms on average (over columns).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": {} |
| }, |
| { |
| "value": "NaturalQuestions (closed-book) - ECE (10-bin)", |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\n10-bin expected calibration error: The average difference between the model's confidence and accuracy, averaged across 10 bins where each bin contains an equal number of points (only computed for classification tasks). Warning - not reliable for small datasets (e.g., with < 300 examples) because each bin will have very few examples.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "ECE (10-bin)", |
| "run_group": "NaturalQuestions (closed-book)" |
| } |
| }, |
| { |
| "value": "HellaSwag - ECE (10-bin)", |
| "description": "The HellaSwag benchmark for commonsense reasoning in question answering [(Zellers et al., 2019)](https://aclanthology.org/P19-1472/).\n\n10-bin expected calibration error: The average difference between the model's confidence and accuracy, averaged across 10 bins where each bin contains an equal number of points (only computed for classification tasks). Warning - not reliable for small datasets (e.g., with < 300 examples) because each bin will have very few examples.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "ECE (10-bin)", |
| "run_group": "HellaSwag" |
| } |
| }, |
| { |
| "value": "OpenbookQA - ECE (10-bin)", |
| "description": "The OpenbookQA benchmark for commonsense-intensive open book question answering [(Mihaylov et al., 2018)](https://aclanthology.org/D18-1260/).\n\n10-bin expected calibration error: The average difference between the model's confidence and accuracy, averaged across 10 bins where each bin contains an equal number of points (only computed for classification tasks). Warning - not reliable for small datasets (e.g., with < 300 examples) because each bin will have very few examples.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "ECE (10-bin)", |
| "run_group": "OpenbookQA" |
| } |
| }, |
| { |
| "value": "TruthfulQA - ECE (10-bin)", |
| "description": "The TruthfulQA benchmarking for measuring model truthfulness and commonsense knowledge in question answering [(Lin et al., 2022)](https://aclanthology.org/2022.acl-long.229/).\n\n10-bin expected calibration error: The average difference between the model's confidence and accuracy, averaged across 10 bins where each bin contains an equal number of points (only computed for classification tasks). Warning - not reliable for small datasets (e.g., with < 300 examples) because each bin will have very few examples.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "ECE (10-bin)", |
| "run_group": "TruthfulQA" |
| } |
| }, |
| { |
| "value": "MMLU - ECE (10-bin)", |
| "description": "The Massive Multitask Language Understanding (MMLU) benchmark for knowledge-intensive question answering across 57 domains [(Hendrycks et al., 2021)](https://openreview.net/forum?id=d7KBjmI3GmQ).\n\n10-bin expected calibration error: The average difference between the model's confidence and accuracy, averaged across 10 bins where each bin contains an equal number of points (only computed for classification tasks). Warning - not reliable for small datasets (e.g., with < 300 examples) because each bin will have very few examples.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "ECE (10-bin)", |
| "run_group": "MMLU" |
| } |
| } |
| ], |
| "rows": [ |
| [ |
| { |
| "value": "EleutherAI/pythia-2.8b", |
| "description": "", |
| "markdown": false |
| }, |
| { |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "value": 0.1935229606887982, |
| "description": "min=0.149, mean=0.194, max=0.247, sum=0.968 (5)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "mmlu:subject=abstract_algebra,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=college_chemistry,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=computer_security,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=econometrics,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=us_foreign_policy,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical" |
| ] |
| } |
| ] |
| ], |
| "links": [ |
| { |
| "text": "LaTeX", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/targeted_evaluations_calibration.tex" |
| }, |
| { |
| "text": "JSON", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/targeted_evaluations_calibration.json" |
| } |
| ], |
| "name": "calibration" |
| }, |
| { |
| "title": "Robustness", |
| "header": [ |
| { |
| "value": "Model", |
| "markdown": false, |
| "metadata": {} |
| }, |
| { |
| "value": "Mean win rate", |
| "description": "How many models this model outperforms on average (over columns).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": {} |
| }, |
| { |
| "value": "NaturalQuestions (closed-book) - F1 (Robustness)", |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\nF1: Average F1 score in terms of word overlap between the model output and correct reference.\n- Perturbation Robustness: Computes worst case over different robustness perturbations (misspellings, formatting, contrast sets).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "F1", |
| "run_group": "NaturalQuestions (closed-book)", |
| "perturbation": "Robustness" |
| } |
| }, |
| { |
| "value": "HellaSwag - EM (Robustness)", |
| "description": "The HellaSwag benchmark for commonsense reasoning in question answering [(Zellers et al., 2019)](https://aclanthology.org/P19-1472/).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.\n- Perturbation Robustness: Computes worst case over different robustness perturbations (misspellings, formatting, contrast sets).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "HellaSwag", |
| "perturbation": "Robustness" |
| } |
| }, |
| { |
| "value": "OpenbookQA - EM (Robustness)", |
| "description": "The OpenbookQA benchmark for commonsense-intensive open book question answering [(Mihaylov et al., 2018)](https://aclanthology.org/D18-1260/).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.\n- Perturbation Robustness: Computes worst case over different robustness perturbations (misspellings, formatting, contrast sets).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "OpenbookQA", |
| "perturbation": "Robustness" |
| } |
| }, |
| { |
| "value": "TruthfulQA - EM (Robustness)", |
| "description": "The TruthfulQA benchmarking for measuring model truthfulness and commonsense knowledge in question answering [(Lin et al., 2022)](https://aclanthology.org/2022.acl-long.229/).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.\n- Perturbation Robustness: Computes worst case over different robustness perturbations (misspellings, formatting, contrast sets).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "TruthfulQA", |
| "perturbation": "Robustness" |
| } |
| }, |
| { |
| "value": "MMLU - EM (Robustness)", |
| "description": "The Massive Multitask Language Understanding (MMLU) benchmark for knowledge-intensive question answering across 57 domains [(Hendrycks et al., 2021)](https://openreview.net/forum?id=d7KBjmI3GmQ).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.\n- Perturbation Robustness: Computes worst case over different robustness perturbations (misspellings, formatting, contrast sets).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "MMLU", |
| "perturbation": "Robustness" |
| } |
| } |
| ], |
| "rows": [ |
| [ |
| { |
| "value": "EleutherAI/pythia-2.8b", |
| "description": "", |
| "markdown": false |
| }, |
| { |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "value": 0.24035087719298245, |
| "description": "min=0.16, mean=0.24, max=0.36, sum=1.202 (5)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "mmlu:subject=abstract_algebra,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=college_chemistry,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=computer_security,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=econometrics,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=us_foreign_policy,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical" |
| ] |
| } |
| ] |
| ], |
| "links": [ |
| { |
| "text": "LaTeX", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/targeted_evaluations_robustness.tex" |
| }, |
| { |
| "text": "JSON", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/targeted_evaluations_robustness.json" |
| } |
| ], |
| "name": "robustness" |
| }, |
| { |
| "title": "Fairness", |
| "header": [ |
| { |
| "value": "Model", |
| "markdown": false, |
| "metadata": {} |
| }, |
| { |
| "value": "Mean win rate", |
| "description": "How many models this model outperforms on average (over columns).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": {} |
| }, |
| { |
| "value": "NaturalQuestions (closed-book) - F1 (Fairness)", |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\nF1: Average F1 score in terms of word overlap between the model output and correct reference.\n- Perturbation Fairness: Computes worst case over different fairness perturbations (changing dialect, race of names, gender).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "F1", |
| "run_group": "NaturalQuestions (closed-book)", |
| "perturbation": "Fairness" |
| } |
| }, |
| { |
| "value": "HellaSwag - EM (Fairness)", |
| "description": "The HellaSwag benchmark for commonsense reasoning in question answering [(Zellers et al., 2019)](https://aclanthology.org/P19-1472/).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.\n- Perturbation Fairness: Computes worst case over different fairness perturbations (changing dialect, race of names, gender).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "HellaSwag", |
| "perturbation": "Fairness" |
| } |
| }, |
| { |
| "value": "OpenbookQA - EM (Fairness)", |
| "description": "The OpenbookQA benchmark for commonsense-intensive open book question answering [(Mihaylov et al., 2018)](https://aclanthology.org/D18-1260/).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.\n- Perturbation Fairness: Computes worst case over different fairness perturbations (changing dialect, race of names, gender).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "OpenbookQA", |
| "perturbation": "Fairness" |
| } |
| }, |
| { |
| "value": "TruthfulQA - EM (Fairness)", |
| "description": "The TruthfulQA benchmarking for measuring model truthfulness and commonsense knowledge in question answering [(Lin et al., 2022)](https://aclanthology.org/2022.acl-long.229/).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.\n- Perturbation Fairness: Computes worst case over different fairness perturbations (changing dialect, race of names, gender).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "TruthfulQA", |
| "perturbation": "Fairness" |
| } |
| }, |
| { |
| "value": "MMLU - EM (Fairness)", |
| "description": "The Massive Multitask Language Understanding (MMLU) benchmark for knowledge-intensive question answering across 57 domains [(Hendrycks et al., 2021)](https://openreview.net/forum?id=d7KBjmI3GmQ).\n\nExact match: Fraction of instances that the predicted output matches a correct reference exactly.\n- Perturbation Fairness: Computes worst case over different fairness perturbations (changing dialect, race of names, gender).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "EM", |
| "run_group": "MMLU", |
| "perturbation": "Fairness" |
| } |
| } |
| ], |
| "rows": [ |
| [ |
| { |
| "value": "EleutherAI/pythia-2.8b", |
| "description": "", |
| "markdown": false |
| }, |
| { |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "value": 0.25635087719298244, |
| "description": "min=0.16, mean=0.256, max=0.41, sum=1.282 (5)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "mmlu:subject=abstract_algebra,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=college_chemistry,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=computer_security,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=econometrics,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=us_foreign_policy,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical" |
| ] |
| } |
| ] |
| ], |
| "links": [ |
| { |
| "text": "LaTeX", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/targeted_evaluations_fairness.tex" |
| }, |
| { |
| "text": "JSON", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/targeted_evaluations_fairness.json" |
| } |
| ], |
| "name": "fairness" |
| }, |
| { |
| "title": "Bias", |
| "header": [ |
| { |
| "value": "Model", |
| "markdown": false, |
| "metadata": {} |
| }, |
| { |
| "value": "Mean win rate", |
| "description": "How many models this model outperforms on average (over columns).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": {} |
| }, |
| { |
| "value": "NaturalQuestions (closed-book) - Stereotypes (race)", |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\nStereotypical associations (race, profession): Measures uneven association of racial groups (Asian, Hispanic, White) with target professions. This measurement is based on cooccurence statistics between the racially-associated names (word list from [Garg et al., 2018](https://www.pnas.org/doi/10.1073/pnas.1720347115); race associations based on US Census statistics) and the target professions (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Stereotypes (race)", |
| "run_group": "NaturalQuestions (closed-book)" |
| } |
| }, |
| { |
| "value": "NaturalQuestions (closed-book) - Stereotypes (gender)", |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\nStereotypical associations (gender, profession): Measures uneven association of gender groups (male, female) with target professions. This measurement is based on cooccurence statistics between the gender terms (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)) and the target professions (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Stereotypes (gender)", |
| "run_group": "NaturalQuestions (closed-book)" |
| } |
| }, |
| { |
| "value": "NaturalQuestions (closed-book) - Representation (race)", |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\nDemographic representation (race): Measures uneven representation of racial groups (Asian, Hispanic, White). This measurement is based on disparities in the frequency statistics across racially-associated names (word list from [Garg et al., 2018](https://www.pnas.org/doi/10.1073/pnas.1720347115); race associations based on US Census statistics).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Representation (race)", |
| "run_group": "NaturalQuestions (closed-book)" |
| } |
| }, |
| { |
| "value": "NaturalQuestions (closed-book) - Representation (gender)", |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\nDemographic representation (gender): Measures uneven representation of gender groups (male, female). This measurement is based on disparities in the frequency statistics across gender terms (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Representation (gender)", |
| "run_group": "NaturalQuestions (closed-book)" |
| } |
| }, |
| { |
| "value": "Copyright (text) - Stereotypes (race)", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for books, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nStereotypical associations (race, profession): Measures uneven association of racial groups (Asian, Hispanic, White) with target professions. This measurement is based on cooccurence statistics between the racially-associated names (word list from [Garg et al., 2018](https://www.pnas.org/doi/10.1073/pnas.1720347115); race associations based on US Census statistics) and the target professions (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Stereotypes (race)", |
| "run_group": "Copyright (text)" |
| } |
| }, |
| { |
| "value": "Copyright (text) - Stereotypes (gender)", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for books, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nStereotypical associations (gender, profession): Measures uneven association of gender groups (male, female) with target professions. This measurement is based on cooccurence statistics between the gender terms (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)) and the target professions (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Stereotypes (gender)", |
| "run_group": "Copyright (text)" |
| } |
| }, |
| { |
| "value": "Copyright (text) - Representation (race)", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for books, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nDemographic representation (race): Measures uneven representation of racial groups (Asian, Hispanic, White). This measurement is based on disparities in the frequency statistics across racially-associated names (word list from [Garg et al., 2018](https://www.pnas.org/doi/10.1073/pnas.1720347115); race associations based on US Census statistics).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Representation (race)", |
| "run_group": "Copyright (text)" |
| } |
| }, |
| { |
| "value": "Copyright (text) - Representation (gender)", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for books, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nDemographic representation (gender): Measures uneven representation of gender groups (male, female). This measurement is based on disparities in the frequency statistics across gender terms (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Representation (gender)", |
| "run_group": "Copyright (text)" |
| } |
| }, |
| { |
| "value": "Copyright (code) - Stereotypes (race)", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for code, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nStereotypical associations (race, profession): Measures uneven association of racial groups (Asian, Hispanic, White) with target professions. This measurement is based on cooccurence statistics between the racially-associated names (word list from [Garg et al., 2018](https://www.pnas.org/doi/10.1073/pnas.1720347115); race associations based on US Census statistics) and the target professions (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Stereotypes (race)", |
| "run_group": "Copyright (code)" |
| } |
| }, |
| { |
| "value": "Copyright (code) - Stereotypes (gender)", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for code, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nStereotypical associations (gender, profession): Measures uneven association of gender groups (male, female) with target professions. This measurement is based on cooccurence statistics between the gender terms (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)) and the target professions (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Stereotypes (gender)", |
| "run_group": "Copyright (code)" |
| } |
| }, |
| { |
| "value": "Copyright (code) - Representation (race)", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for code, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nDemographic representation (race): Measures uneven representation of racial groups (Asian, Hispanic, White). This measurement is based on disparities in the frequency statistics across racially-associated names (word list from [Garg et al., 2018](https://www.pnas.org/doi/10.1073/pnas.1720347115); race associations based on US Census statistics).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Representation (race)", |
| "run_group": "Copyright (code)" |
| } |
| }, |
| { |
| "value": "Copyright (code) - Representation (gender)", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for code, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nDemographic representation (gender): Measures uneven representation of gender groups (male, female). This measurement is based on disparities in the frequency statistics across gender terms (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Representation (gender)", |
| "run_group": "Copyright (code)" |
| } |
| }, |
| { |
| "value": "Disinformation (reiteration) - Stereotypes (race)", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to reiterate disinformation content.\n\nStereotypical associations (race, profession): Measures uneven association of racial groups (Asian, Hispanic, White) with target professions. This measurement is based on cooccurence statistics between the racially-associated names (word list from [Garg et al., 2018](https://www.pnas.org/doi/10.1073/pnas.1720347115); race associations based on US Census statistics) and the target professions (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Stereotypes (race)", |
| "run_group": "Disinformation (reiteration)" |
| } |
| }, |
| { |
| "value": "Disinformation (reiteration) - Stereotypes (gender)", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to reiterate disinformation content.\n\nStereotypical associations (gender, profession): Measures uneven association of gender groups (male, female) with target professions. This measurement is based on cooccurence statistics between the gender terms (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)) and the target professions (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Stereotypes (gender)", |
| "run_group": "Disinformation (reiteration)" |
| } |
| }, |
| { |
| "value": "Disinformation (reiteration) - Representation (race)", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to reiterate disinformation content.\n\nDemographic representation (race): Measures uneven representation of racial groups (Asian, Hispanic, White). This measurement is based on disparities in the frequency statistics across racially-associated names (word list from [Garg et al., 2018](https://www.pnas.org/doi/10.1073/pnas.1720347115); race associations based on US Census statistics).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Representation (race)", |
| "run_group": "Disinformation (reiteration)" |
| } |
| }, |
| { |
| "value": "Disinformation (reiteration) - Representation (gender)", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to reiterate disinformation content.\n\nDemographic representation (gender): Measures uneven representation of gender groups (male, female). This measurement is based on disparities in the frequency statistics across gender terms (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Representation (gender)", |
| "run_group": "Disinformation (reiteration)" |
| } |
| }, |
| { |
| "value": "Disinformation (wedging) - Stereotypes (race)", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to generate divisive and wedging content.\n\nStereotypical associations (race, profession): Measures uneven association of racial groups (Asian, Hispanic, White) with target professions. This measurement is based on cooccurence statistics between the racially-associated names (word list from [Garg et al., 2018](https://www.pnas.org/doi/10.1073/pnas.1720347115); race associations based on US Census statistics) and the target professions (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Stereotypes (race)", |
| "run_group": "Disinformation (wedging)" |
| } |
| }, |
| { |
| "value": "Disinformation (wedging) - Stereotypes (gender)", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to generate divisive and wedging content.\n\nStereotypical associations (gender, profession): Measures uneven association of gender groups (male, female) with target professions. This measurement is based on cooccurence statistics between the gender terms (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)) and the target professions (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Stereotypes (gender)", |
| "run_group": "Disinformation (wedging)" |
| } |
| }, |
| { |
| "value": "Disinformation (wedging) - Representation (race)", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to generate divisive and wedging content.\n\nDemographic representation (race): Measures uneven representation of racial groups (Asian, Hispanic, White). This measurement is based on disparities in the frequency statistics across racially-associated names (word list from [Garg et al., 2018](https://www.pnas.org/doi/10.1073/pnas.1720347115); race associations based on US Census statistics).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Representation (race)", |
| "run_group": "Disinformation (wedging)" |
| } |
| }, |
| { |
| "value": "Disinformation (wedging) - Representation (gender)", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to generate divisive and wedging content.\n\nDemographic representation (gender): Measures uneven representation of gender groups (male, female). This measurement is based on disparities in the frequency statistics across gender terms (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Representation (gender)", |
| "run_group": "Disinformation (wedging)" |
| } |
| }, |
| { |
| "value": "BOLD - Stereotypes (race)", |
| "description": "The Bias in Open-Ended Language Generation Dataset (BOLD) for measuring biases and toxicity in open-ended language generation [(Dhamala et al., 2021)](https://dl.acm.org/doi/10.1145/3442188.3445924).\n\nStereotypical associations (race, profession): Measures uneven association of racial groups (Asian, Hispanic, White) with target professions. This measurement is based on cooccurence statistics between the racially-associated names (word list from [Garg et al., 2018](https://www.pnas.org/doi/10.1073/pnas.1720347115); race associations based on US Census statistics) and the target professions (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Stereotypes (race)", |
| "run_group": "BOLD" |
| } |
| }, |
| { |
| "value": "BOLD - Stereotypes (gender)", |
| "description": "The Bias in Open-Ended Language Generation Dataset (BOLD) for measuring biases and toxicity in open-ended language generation [(Dhamala et al., 2021)](https://dl.acm.org/doi/10.1145/3442188.3445924).\n\nStereotypical associations (gender, profession): Measures uneven association of gender groups (male, female) with target professions. This measurement is based on cooccurence statistics between the gender terms (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)) and the target professions (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Stereotypes (gender)", |
| "run_group": "BOLD" |
| } |
| }, |
| { |
| "value": "BOLD - Representation (race)", |
| "description": "The Bias in Open-Ended Language Generation Dataset (BOLD) for measuring biases and toxicity in open-ended language generation [(Dhamala et al., 2021)](https://dl.acm.org/doi/10.1145/3442188.3445924).\n\nDemographic representation (race): Measures uneven representation of racial groups (Asian, Hispanic, White). This measurement is based on disparities in the frequency statistics across racially-associated names (word list from [Garg et al., 2018](https://www.pnas.org/doi/10.1073/pnas.1720347115); race associations based on US Census statistics).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Representation (race)", |
| "run_group": "BOLD" |
| } |
| }, |
| { |
| "value": "BOLD - Representation (gender)", |
| "description": "The Bias in Open-Ended Language Generation Dataset (BOLD) for measuring biases and toxicity in open-ended language generation [(Dhamala et al., 2021)](https://dl.acm.org/doi/10.1145/3442188.3445924).\n\nDemographic representation (gender): Measures uneven representation of gender groups (male, female). This measurement is based on disparities in the frequency statistics across gender terms (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Representation (gender)", |
| "run_group": "BOLD" |
| } |
| }, |
| { |
| "value": "RealToxicityPrompts - Stereotypes (race)", |
| "description": "The RealToxicityPrompts dataset for measuring toxicity in prompted model generations [(Gehman et al., 2020)](https://aclanthology.org/2020.findings-emnlp.301/).\n\nStereotypical associations (race, profession): Measures uneven association of racial groups (Asian, Hispanic, White) with target professions. This measurement is based on cooccurence statistics between the racially-associated names (word list from [Garg et al., 2018](https://www.pnas.org/doi/10.1073/pnas.1720347115); race associations based on US Census statistics) and the target professions (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Stereotypes (race)", |
| "run_group": "RealToxicityPrompts" |
| } |
| }, |
| { |
| "value": "RealToxicityPrompts - Stereotypes (gender)", |
| "description": "The RealToxicityPrompts dataset for measuring toxicity in prompted model generations [(Gehman et al., 2020)](https://aclanthology.org/2020.findings-emnlp.301/).\n\nStereotypical associations (gender, profession): Measures uneven association of gender groups (male, female) with target professions. This measurement is based on cooccurence statistics between the gender terms (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)) and the target professions (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Stereotypes (gender)", |
| "run_group": "RealToxicityPrompts" |
| } |
| }, |
| { |
| "value": "RealToxicityPrompts - Representation (race)", |
| "description": "The RealToxicityPrompts dataset for measuring toxicity in prompted model generations [(Gehman et al., 2020)](https://aclanthology.org/2020.findings-emnlp.301/).\n\nDemographic representation (race): Measures uneven representation of racial groups (Asian, Hispanic, White). This measurement is based on disparities in the frequency statistics across racially-associated names (word list from [Garg et al., 2018](https://www.pnas.org/doi/10.1073/pnas.1720347115); race associations based on US Census statistics).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Representation (race)", |
| "run_group": "RealToxicityPrompts" |
| } |
| }, |
| { |
| "value": "RealToxicityPrompts - Representation (gender)", |
| "description": "The RealToxicityPrompts dataset for measuring toxicity in prompted model generations [(Gehman et al., 2020)](https://aclanthology.org/2020.findings-emnlp.301/).\n\nDemographic representation (gender): Measures uneven representation of gender groups (male, female). This measurement is based on disparities in the frequency statistics across gender terms (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Representation (gender)", |
| "run_group": "RealToxicityPrompts" |
| } |
| } |
| ], |
| "rows": [ |
| [ |
| { |
| "value": "EleutherAI/pythia-2.8b", |
| "description": "", |
| "markdown": false |
| }, |
| { |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| } |
| ] |
| ], |
| "links": [ |
| { |
| "text": "LaTeX", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/targeted_evaluations_bias.tex" |
| }, |
| { |
| "text": "JSON", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/targeted_evaluations_bias.json" |
| } |
| ], |
| "name": "bias" |
| }, |
| { |
| "title": "Toxicity", |
| "header": [ |
| { |
| "value": "Model", |
| "markdown": false, |
| "metadata": {} |
| }, |
| { |
| "value": "Mean win rate", |
| "description": "How many models this model outperforms on average (over columns).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": {} |
| }, |
| { |
| "value": "NaturalQuestions (closed-book) - Toxic fraction", |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\nToxic fraction: Fraction of model outputs that are toxic (based on the PerspectiveAPI toxicity classifier).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Toxic fraction", |
| "run_group": "NaturalQuestions (closed-book)" |
| } |
| }, |
| { |
| "value": "Copyright (text) - Toxic fraction", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for books, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nToxic fraction: Fraction of model outputs that are toxic (based on the PerspectiveAPI toxicity classifier).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Toxic fraction", |
| "run_group": "Copyright (text)" |
| } |
| }, |
| { |
| "value": "Copyright (code) - Toxic fraction", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for code, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nToxic fraction: Fraction of model outputs that are toxic (based on the PerspectiveAPI toxicity classifier).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Toxic fraction", |
| "run_group": "Copyright (code)" |
| } |
| }, |
| { |
| "value": "Disinformation (reiteration) - Toxic fraction", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to reiterate disinformation content.\n\nToxic fraction: Fraction of model outputs that are toxic (based on the PerspectiveAPI toxicity classifier).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Toxic fraction", |
| "run_group": "Disinformation (reiteration)" |
| } |
| }, |
| { |
| "value": "Disinformation (wedging) - Toxic fraction", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to generate divisive and wedging content.\n\nToxic fraction: Fraction of model outputs that are toxic (based on the PerspectiveAPI toxicity classifier).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Toxic fraction", |
| "run_group": "Disinformation (wedging)" |
| } |
| }, |
| { |
| "value": "BOLD - Toxic fraction", |
| "description": "The Bias in Open-Ended Language Generation Dataset (BOLD) for measuring biases and toxicity in open-ended language generation [(Dhamala et al., 2021)](https://dl.acm.org/doi/10.1145/3442188.3445924).\n\nToxic fraction: Fraction of model outputs that are toxic (based on the PerspectiveAPI toxicity classifier).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Toxic fraction", |
| "run_group": "BOLD" |
| } |
| }, |
| { |
| "value": "RealToxicityPrompts - Toxic fraction", |
| "description": "The RealToxicityPrompts dataset for measuring toxicity in prompted model generations [(Gehman et al., 2020)](https://aclanthology.org/2020.findings-emnlp.301/).\n\nToxic fraction: Fraction of model outputs that are toxic (based on the PerspectiveAPI toxicity classifier).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Toxic fraction", |
| "run_group": "RealToxicityPrompts" |
| } |
| } |
| ], |
| "rows": [ |
| [ |
| { |
| "value": "EleutherAI/pythia-2.8b", |
| "description": "", |
| "markdown": false |
| }, |
| { |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| } |
| ] |
| ], |
| "links": [ |
| { |
| "text": "LaTeX", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/targeted_evaluations_toxicity.tex" |
| }, |
| { |
| "text": "JSON", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/targeted_evaluations_toxicity.json" |
| } |
| ], |
| "name": "toxicity" |
| }, |
| { |
| "title": "APPS metrics", |
| "header": [ |
| { |
| "value": "Model", |
| "markdown": false, |
| "metadata": {} |
| }, |
| { |
| "value": "Mean win rate", |
| "description": "How many models this model outperforms on average (over columns).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": {} |
| }, |
| { |
| "value": "APPS (Code) - Avg. # tests passed", |
| "description": "The APPS benchmark for measuring competence on code challenges [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/c24cd76e1ce41366a4bbe8a49b02a028-Abstract-round2.html).\n\nAvg. # tests passed: Average number of tests passed by model outputs.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "Avg. # tests passed", |
| "run_group": "APPS (Code)" |
| } |
| }, |
| { |
| "value": "APPS (Code) - Strict correctness", |
| "description": "The APPS benchmark for measuring competence on code challenges [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/c24cd76e1ce41366a4bbe8a49b02a028-Abstract-round2.html).\n\nStrict correctness: Fraction of models outputs that pass all associated test cases.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "Strict correctness", |
| "run_group": "APPS (Code)" |
| } |
| } |
| ], |
| "rows": [ |
| [ |
| { |
| "value": "EleutherAI/pythia-2.8b", |
| "description": "", |
| "markdown": false |
| }, |
| { |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| } |
| ] |
| ], |
| "links": [ |
| { |
| "text": "LaTeX", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/targeted_evaluations_apps_metrics.tex" |
| }, |
| { |
| "text": "JSON", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/targeted_evaluations_apps_metrics.json" |
| } |
| ], |
| "name": "apps_metrics" |
| }, |
| { |
| "title": "Copyright metrics", |
| "header": [ |
| { |
| "value": "Model", |
| "markdown": false, |
| "metadata": {} |
| }, |
| { |
| "value": "Mean win rate", |
| "description": "How many models this model outperforms on average (over columns).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": {} |
| }, |
| { |
| "value": "Copyright (text) - LCS", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for books, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nLongest common prefix length: Average length of longest common prefix between model generation and reference.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "LCS", |
| "run_group": "Copyright (text)" |
| } |
| }, |
| { |
| "value": "Copyright (text) - Edit dist.", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for books, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nEdit distance (Levenshtein): Average Levenshtein edit distance between model generation and reference.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "Edit dist.", |
| "run_group": "Copyright (text)" |
| } |
| }, |
| { |
| "value": "Copyright (text) - Edit sim.", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for books, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nEdit similarity (Levenshtein): Average Levenshtein edit similarity (1 - distance normalized by length of longer sequence) between model generation and reference.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Edit sim.", |
| "run_group": "Copyright (text)" |
| } |
| }, |
| { |
| "value": "Copyright (code) - LCS", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for code, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nLongest common prefix length: Average length of longest common prefix between model generation and reference.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "LCS", |
| "run_group": "Copyright (code)" |
| } |
| }, |
| { |
| "value": "Copyright (code) - Edit dist.", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for code, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nEdit distance (Levenshtein): Average Levenshtein edit distance between model generation and reference.", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": { |
| "metric": "Edit dist.", |
| "run_group": "Copyright (code)" |
| } |
| }, |
| { |
| "value": "Copyright (code) - Edit sim.", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for code, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nEdit similarity (Levenshtein): Average Levenshtein edit similarity (1 - distance normalized by length of longer sequence) between model generation and reference.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Edit sim.", |
| "run_group": "Copyright (code)" |
| } |
| } |
| ], |
| "rows": [ |
| [ |
| { |
| "value": "EleutherAI/pythia-2.8b", |
| "description": "", |
| "markdown": false |
| }, |
| { |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| } |
| ] |
| ], |
| "links": [ |
| { |
| "text": "LaTeX", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/targeted_evaluations_copyright_metrics.tex" |
| }, |
| { |
| "text": "JSON", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/targeted_evaluations_copyright_metrics.json" |
| } |
| ], |
| "name": "copyright_metrics" |
| }, |
| { |
| "title": "Disinformation metrics", |
| "header": [ |
| { |
| "value": "Model", |
| "markdown": false, |
| "metadata": {} |
| }, |
| { |
| "value": "Mean win rate", |
| "description": "How many models this model outperforms on average (over columns).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": {} |
| }, |
| { |
| "value": "Disinformation (reiteration) - Self-BLEU", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to reiterate disinformation content.\n\nSelf-BLEU: Self-BLEU metric of [Zhu et al. (2018)](https://arxiv.org/pdf/1802.01886.pdf) as a measure of diversity in model generations.", |
| "markdown": false, |
| "metadata": { |
| "metric": "Self-BLEU", |
| "run_group": "Disinformation (reiteration)" |
| } |
| }, |
| { |
| "value": "Disinformation (reiteration) - Entropy", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to reiterate disinformation content.\n\nEntropy (Monte Carlo): Monte Carlo estimate of the entropy as a measure of diversity in model generations.", |
| "markdown": false, |
| "metadata": { |
| "metric": "Entropy", |
| "run_group": "Disinformation (reiteration)" |
| } |
| }, |
| { |
| "value": "Disinformation (wedging) - Self-BLEU", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to generate divisive and wedging content.\n\nSelf-BLEU: Self-BLEU metric of [Zhu et al. (2018)](https://arxiv.org/pdf/1802.01886.pdf) as a measure of diversity in model generations.", |
| "markdown": false, |
| "metadata": { |
| "metric": "Self-BLEU", |
| "run_group": "Disinformation (wedging)" |
| } |
| }, |
| { |
| "value": "Disinformation (wedging) - Entropy", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to generate divisive and wedging content.\n\nEntropy (Monte Carlo): Monte Carlo estimate of the entropy as a measure of diversity in model generations.", |
| "markdown": false, |
| "metadata": { |
| "metric": "Entropy", |
| "run_group": "Disinformation (wedging)" |
| } |
| } |
| ], |
| "rows": [ |
| [ |
| { |
| "value": "EleutherAI/pythia-2.8b", |
| "description": "", |
| "markdown": false |
| }, |
| { |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| } |
| ] |
| ], |
| "links": [ |
| { |
| "text": "LaTeX", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/targeted_evaluations_disinformation_metrics.tex" |
| }, |
| { |
| "text": "JSON", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/targeted_evaluations_disinformation_metrics.json" |
| } |
| ], |
| "name": "disinformation_metrics" |
| }, |
| { |
| "title": "BBQ metrics", |
| "header": [ |
| { |
| "value": "Model", |
| "markdown": false, |
| "metadata": {} |
| }, |
| { |
| "value": "Mean win rate", |
| "description": "How many models this model outperforms on average (over columns).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": {} |
| }, |
| { |
| "value": "BBQ - BBQ (ambiguous)", |
| "description": "The Bias Benchmark for Question Answering (BBQ) for measuring social bias in question answering in ambiguous and unambigous context [(Parrish et al., 2022)](https://aclanthology.org/2022.findings-acl.165/).\n\nBBQ (ambiguous): Metric of [Parrish et al. (2022)](https://aclanthology.org/2022.findings-acl.165/) for BBQ on ambiguous examples.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "BBQ (ambiguous)", |
| "run_group": "BBQ" |
| } |
| }, |
| { |
| "value": "BBQ - BBQ (unambiguous)", |
| "description": "The Bias Benchmark for Question Answering (BBQ) for measuring social bias in question answering in ambiguous and unambigous context [(Parrish et al., 2022)](https://aclanthology.org/2022.findings-acl.165/).\n\nBBQ (unambiguous): Metric of [Parrish et al. (2022)](https://aclanthology.org/2022.findings-acl.165/) for BBQ on unambiguous examples.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "BBQ (unambiguous)", |
| "run_group": "BBQ" |
| } |
| } |
| ], |
| "rows": [ |
| [ |
| { |
| "value": "EleutherAI/pythia-2.8b", |
| "description": "", |
| "markdown": false |
| }, |
| { |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| } |
| ] |
| ], |
| "links": [ |
| { |
| "text": "LaTeX", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/targeted_evaluations_bbq_metrics.tex" |
| }, |
| { |
| "text": "JSON", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/targeted_evaluations_bbq_metrics.json" |
| } |
| ], |
| "name": "bbq_metrics" |
| }, |
| { |
| "title": "Efficiency (Detailed)", |
| "header": [ |
| { |
| "value": "Model", |
| "markdown": false, |
| "metadata": {} |
| }, |
| { |
| "value": "Mean win rate", |
| "description": "How many models this model outperforms on average (over columns).", |
| "markdown": false, |
| "lower_is_better": false, |
| "metadata": {} |
| }, |
| { |
| "value": "The Pile - Observed inference time (s)", |
| "description": "The Pile corpus for measuring lanugage model performance across various domains [(Gao et al., 2020)](https://arxiv.org/pdf/2101.00027.pdf).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "The Pile" |
| } |
| }, |
| { |
| "value": "The Pile - Idealized inference time (s)", |
| "description": "The Pile corpus for measuring lanugage model performance across various domains [(Gao et al., 2020)](https://arxiv.org/pdf/2101.00027.pdf).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "The Pile" |
| } |
| }, |
| { |
| "value": "The Pile - Denoised inference time (s)", |
| "description": "The Pile corpus for measuring lanugage model performance across various domains [(Gao et al., 2020)](https://arxiv.org/pdf/2101.00027.pdf).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "The Pile" |
| } |
| }, |
| { |
| "value": "The Pile - Training emissions (kg CO2)", |
| "description": "The Pile corpus for measuring lanugage model performance across various domains [(Gao et al., 2020)](https://arxiv.org/pdf/2101.00027.pdf).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "The Pile" |
| } |
| }, |
| { |
| "value": "The Pile - Training energy (MWh)", |
| "description": "The Pile corpus for measuring lanugage model performance across various domains [(Gao et al., 2020)](https://arxiv.org/pdf/2101.00027.pdf).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "The Pile" |
| } |
| }, |
| { |
| "value": "TwitterAAE - Observed inference time (s)", |
| "description": "The TwitterAAE corpus of [Blodgett et al. (2016)](https://aclanthology.org/D16-1120/) for measuring language model performance in tweets as a function of speaker dialect.\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "TwitterAAE" |
| } |
| }, |
| { |
| "value": "TwitterAAE - Idealized inference time (s)", |
| "description": "The TwitterAAE corpus of [Blodgett et al. (2016)](https://aclanthology.org/D16-1120/) for measuring language model performance in tweets as a function of speaker dialect.\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "TwitterAAE" |
| } |
| }, |
| { |
| "value": "TwitterAAE - Denoised inference time (s)", |
| "description": "The TwitterAAE corpus of [Blodgett et al. (2016)](https://aclanthology.org/D16-1120/) for measuring language model performance in tweets as a function of speaker dialect.\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "TwitterAAE" |
| } |
| }, |
| { |
| "value": "TwitterAAE - Training emissions (kg CO2)", |
| "description": "The TwitterAAE corpus of [Blodgett et al. (2016)](https://aclanthology.org/D16-1120/) for measuring language model performance in tweets as a function of speaker dialect.\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "TwitterAAE" |
| } |
| }, |
| { |
| "value": "TwitterAAE - Training energy (MWh)", |
| "description": "The TwitterAAE corpus of [Blodgett et al. (2016)](https://aclanthology.org/D16-1120/) for measuring language model performance in tweets as a function of speaker dialect.\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "TwitterAAE" |
| } |
| }, |
| { |
| "value": "ICE - Observed inference time (s)", |
| "description": "The International Corpus of English (ICE) drawn from English speakers from various places in the world, initiated by [Greenbaum (1991)](https://www.cambridge.org/core/journals/english-today/article/abs/ice-the-international-corpus-of-english/47808205394C538393C3FD8E62E5E701).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "ICE" |
| } |
| }, |
| { |
| "value": "ICE - Idealized inference time (s)", |
| "description": "The International Corpus of English (ICE) drawn from English speakers from various places in the world, initiated by [Greenbaum (1991)](https://www.cambridge.org/core/journals/english-today/article/abs/ice-the-international-corpus-of-english/47808205394C538393C3FD8E62E5E701).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "ICE" |
| } |
| }, |
| { |
| "value": "ICE - Denoised inference time (s)", |
| "description": "The International Corpus of English (ICE) drawn from English speakers from various places in the world, initiated by [Greenbaum (1991)](https://www.cambridge.org/core/journals/english-today/article/abs/ice-the-international-corpus-of-english/47808205394C538393C3FD8E62E5E701).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "ICE" |
| } |
| }, |
| { |
| "value": "ICE - Training emissions (kg CO2)", |
| "description": "The International Corpus of English (ICE) drawn from English speakers from various places in the world, initiated by [Greenbaum (1991)](https://www.cambridge.org/core/journals/english-today/article/abs/ice-the-international-corpus-of-english/47808205394C538393C3FD8E62E5E701).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "ICE" |
| } |
| }, |
| { |
| "value": "ICE - Training energy (MWh)", |
| "description": "The International Corpus of English (ICE) drawn from English speakers from various places in the world, initiated by [Greenbaum (1991)](https://www.cambridge.org/core/journals/english-today/article/abs/ice-the-international-corpus-of-english/47808205394C538393C3FD8E62E5E701).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "ICE" |
| } |
| }, |
| { |
| "value": "BLiMP - Observed inference time (s)", |
| "description": "The Benchmark of Linguistic Minimal Pairs for English (BLiMP) for measuring performance on linguistic phenomena using minimal pair design [(Warstadt et al., 2020)](https://aclanthology.org/2020.tacl-1.25/).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "BLiMP" |
| } |
| }, |
| { |
| "value": "BLiMP - Idealized inference time (s)", |
| "description": "The Benchmark of Linguistic Minimal Pairs for English (BLiMP) for measuring performance on linguistic phenomena using minimal pair design [(Warstadt et al., 2020)](https://aclanthology.org/2020.tacl-1.25/).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "BLiMP" |
| } |
| }, |
| { |
| "value": "BLiMP - Denoised inference time (s)", |
| "description": "The Benchmark of Linguistic Minimal Pairs for English (BLiMP) for measuring performance on linguistic phenomena using minimal pair design [(Warstadt et al., 2020)](https://aclanthology.org/2020.tacl-1.25/).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "BLiMP" |
| } |
| }, |
| { |
| "value": "BLiMP - Training emissions (kg CO2)", |
| "description": "The Benchmark of Linguistic Minimal Pairs for English (BLiMP) for measuring performance on linguistic phenomena using minimal pair design [(Warstadt et al., 2020)](https://aclanthology.org/2020.tacl-1.25/).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "BLiMP" |
| } |
| }, |
| { |
| "value": "BLiMP - Training energy (MWh)", |
| "description": "The Benchmark of Linguistic Minimal Pairs for English (BLiMP) for measuring performance on linguistic phenomena using minimal pair design [(Warstadt et al., 2020)](https://aclanthology.org/2020.tacl-1.25/).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "BLiMP" |
| } |
| }, |
| { |
| "value": "NaturalQuestions (closed-book) - Observed inference time (s)", |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "NaturalQuestions (closed-book)" |
| } |
| }, |
| { |
| "value": "NaturalQuestions (closed-book) - Idealized inference time (s)", |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "NaturalQuestions (closed-book)" |
| } |
| }, |
| { |
| "value": "NaturalQuestions (closed-book) - Denoised inference time (s)", |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "NaturalQuestions (closed-book)" |
| } |
| }, |
| { |
| "value": "NaturalQuestions (closed-book) - Training emissions (kg CO2)", |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "NaturalQuestions (closed-book)" |
| } |
| }, |
| { |
| "value": "NaturalQuestions (closed-book) - Training energy (MWh)", |
| "description": "The NaturalQuestions [(Kwiatkowski et al., 2019)](https://aclanthology.org/Q19-1026/) benchmark for question answering based on naturally-occurring queries through Google Search. The input does not include the Wikipedia page with the answer.\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "NaturalQuestions (closed-book)" |
| } |
| }, |
| { |
| "value": "HellaSwag - Observed inference time (s)", |
| "description": "The HellaSwag benchmark for commonsense reasoning in question answering [(Zellers et al., 2019)](https://aclanthology.org/P19-1472/).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "HellaSwag" |
| } |
| }, |
| { |
| "value": "HellaSwag - Idealized inference time (s)", |
| "description": "The HellaSwag benchmark for commonsense reasoning in question answering [(Zellers et al., 2019)](https://aclanthology.org/P19-1472/).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "HellaSwag" |
| } |
| }, |
| { |
| "value": "HellaSwag - Denoised inference time (s)", |
| "description": "The HellaSwag benchmark for commonsense reasoning in question answering [(Zellers et al., 2019)](https://aclanthology.org/P19-1472/).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "HellaSwag" |
| } |
| }, |
| { |
| "value": "HellaSwag - Training emissions (kg CO2)", |
| "description": "The HellaSwag benchmark for commonsense reasoning in question answering [(Zellers et al., 2019)](https://aclanthology.org/P19-1472/).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "HellaSwag" |
| } |
| }, |
| { |
| "value": "HellaSwag - Training energy (MWh)", |
| "description": "The HellaSwag benchmark for commonsense reasoning in question answering [(Zellers et al., 2019)](https://aclanthology.org/P19-1472/).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "HellaSwag" |
| } |
| }, |
| { |
| "value": "OpenbookQA - Observed inference time (s)", |
| "description": "The OpenbookQA benchmark for commonsense-intensive open book question answering [(Mihaylov et al., 2018)](https://aclanthology.org/D18-1260/).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "OpenbookQA" |
| } |
| }, |
| { |
| "value": "OpenbookQA - Idealized inference time (s)", |
| "description": "The OpenbookQA benchmark for commonsense-intensive open book question answering [(Mihaylov et al., 2018)](https://aclanthology.org/D18-1260/).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "OpenbookQA" |
| } |
| }, |
| { |
| "value": "OpenbookQA - Denoised inference time (s)", |
| "description": "The OpenbookQA benchmark for commonsense-intensive open book question answering [(Mihaylov et al., 2018)](https://aclanthology.org/D18-1260/).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "OpenbookQA" |
| } |
| }, |
| { |
| "value": "OpenbookQA - Training emissions (kg CO2)", |
| "description": "The OpenbookQA benchmark for commonsense-intensive open book question answering [(Mihaylov et al., 2018)](https://aclanthology.org/D18-1260/).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "OpenbookQA" |
| } |
| }, |
| { |
| "value": "OpenbookQA - Training energy (MWh)", |
| "description": "The OpenbookQA benchmark for commonsense-intensive open book question answering [(Mihaylov et al., 2018)](https://aclanthology.org/D18-1260/).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "OpenbookQA" |
| } |
| }, |
| { |
| "value": "TruthfulQA - Observed inference time (s)", |
| "description": "The TruthfulQA benchmarking for measuring model truthfulness and commonsense knowledge in question answering [(Lin et al., 2022)](https://aclanthology.org/2022.acl-long.229/).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "TruthfulQA" |
| } |
| }, |
| { |
| "value": "TruthfulQA - Idealized inference time (s)", |
| "description": "The TruthfulQA benchmarking for measuring model truthfulness and commonsense knowledge in question answering [(Lin et al., 2022)](https://aclanthology.org/2022.acl-long.229/).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "TruthfulQA" |
| } |
| }, |
| { |
| "value": "TruthfulQA - Denoised inference time (s)", |
| "description": "The TruthfulQA benchmarking for measuring model truthfulness and commonsense knowledge in question answering [(Lin et al., 2022)](https://aclanthology.org/2022.acl-long.229/).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "TruthfulQA" |
| } |
| }, |
| { |
| "value": "TruthfulQA - Training emissions (kg CO2)", |
| "description": "The TruthfulQA benchmarking for measuring model truthfulness and commonsense knowledge in question answering [(Lin et al., 2022)](https://aclanthology.org/2022.acl-long.229/).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "TruthfulQA" |
| } |
| }, |
| { |
| "value": "TruthfulQA - Training energy (MWh)", |
| "description": "The TruthfulQA benchmarking for measuring model truthfulness and commonsense knowledge in question answering [(Lin et al., 2022)](https://aclanthology.org/2022.acl-long.229/).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "TruthfulQA" |
| } |
| }, |
| { |
| "value": "MMLU - Observed inference time (s)", |
| "description": "The Massive Multitask Language Understanding (MMLU) benchmark for knowledge-intensive question answering across 57 domains [(Hendrycks et al., 2021)](https://openreview.net/forum?id=d7KBjmI3GmQ).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "MMLU" |
| } |
| }, |
| { |
| "value": "MMLU - Idealized inference time (s)", |
| "description": "The Massive Multitask Language Understanding (MMLU) benchmark for knowledge-intensive question answering across 57 domains [(Hendrycks et al., 2021)](https://openreview.net/forum?id=d7KBjmI3GmQ).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "MMLU" |
| } |
| }, |
| { |
| "value": "MMLU - Denoised inference time (s)", |
| "description": "The Massive Multitask Language Understanding (MMLU) benchmark for knowledge-intensive question answering across 57 domains [(Hendrycks et al., 2021)](https://openreview.net/forum?id=d7KBjmI3GmQ).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "MMLU" |
| } |
| }, |
| { |
| "value": "MMLU - Training emissions (kg CO2)", |
| "description": "The Massive Multitask Language Understanding (MMLU) benchmark for knowledge-intensive question answering across 57 domains [(Hendrycks et al., 2021)](https://openreview.net/forum?id=d7KBjmI3GmQ).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "MMLU" |
| } |
| }, |
| { |
| "value": "MMLU - Training energy (MWh)", |
| "description": "The Massive Multitask Language Understanding (MMLU) benchmark for knowledge-intensive question answering across 57 domains [(Hendrycks et al., 2021)](https://openreview.net/forum?id=d7KBjmI3GmQ).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "MMLU" |
| } |
| }, |
| { |
| "value": "WikiFact - Observed inference time (s)", |
| "description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "WikiFact" |
| } |
| }, |
| { |
| "value": "WikiFact - Idealized inference time (s)", |
| "description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "WikiFact" |
| } |
| }, |
| { |
| "value": "WikiFact - Denoised inference time (s)", |
| "description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "WikiFact" |
| } |
| }, |
| { |
| "value": "WikiFact - Training emissions (kg CO2)", |
| "description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "WikiFact" |
| } |
| }, |
| { |
| "value": "WikiFact - Training energy (MWh)", |
| "description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "WikiFact" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (abstract symbols) - Observed inference time (s)", |
| "description": "Synthetic reasoning tasks defined using abstract symbols based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "Synthetic reasoning (abstract symbols)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (abstract symbols) - Idealized inference time (s)", |
| "description": "Synthetic reasoning tasks defined using abstract symbols based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "Synthetic reasoning (abstract symbols)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (abstract symbols) - Denoised inference time (s)", |
| "description": "Synthetic reasoning tasks defined using abstract symbols based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "Synthetic reasoning (abstract symbols)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (abstract symbols) - Training emissions (kg CO2)", |
| "description": "Synthetic reasoning tasks defined using abstract symbols based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "Synthetic reasoning (abstract symbols)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (abstract symbols) - Training energy (MWh)", |
| "description": "Synthetic reasoning tasks defined using abstract symbols based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "Synthetic reasoning (abstract symbols)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (natural language) - Observed inference time (s)", |
| "description": "Synthetic reasoning tasks defined using simple natural language based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "Synthetic reasoning (natural language)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (natural language) - Idealized inference time (s)", |
| "description": "Synthetic reasoning tasks defined using simple natural language based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "Synthetic reasoning (natural language)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (natural language) - Denoised inference time (s)", |
| "description": "Synthetic reasoning tasks defined using simple natural language based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "Synthetic reasoning (natural language)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (natural language) - Training emissions (kg CO2)", |
| "description": "Synthetic reasoning tasks defined using simple natural language based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "Synthetic reasoning (natural language)" |
| } |
| }, |
| { |
| "value": "Synthetic reasoning (natural language) - Training energy (MWh)", |
| "description": "Synthetic reasoning tasks defined using simple natural language based on LIME [(Wu et al., 2021)](https://proceedings.mlr.press/v139/wu21c.html).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "Synthetic reasoning (natural language)" |
| } |
| }, |
| { |
| "value": "bAbI - Observed inference time (s)", |
| "description": "The bAbI benchmark for measuring understanding and reasoning [(Weston et al., 2015)](https://arxiv.org/pdf/1502.05698.pdf).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "bAbI" |
| } |
| }, |
| { |
| "value": "bAbI - Idealized inference time (s)", |
| "description": "The bAbI benchmark for measuring understanding and reasoning [(Weston et al., 2015)](https://arxiv.org/pdf/1502.05698.pdf).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "bAbI" |
| } |
| }, |
| { |
| "value": "bAbI - Denoised inference time (s)", |
| "description": "The bAbI benchmark for measuring understanding and reasoning [(Weston et al., 2015)](https://arxiv.org/pdf/1502.05698.pdf).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "bAbI" |
| } |
| }, |
| { |
| "value": "bAbI - Training emissions (kg CO2)", |
| "description": "The bAbI benchmark for measuring understanding and reasoning [(Weston et al., 2015)](https://arxiv.org/pdf/1502.05698.pdf).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "bAbI" |
| } |
| }, |
| { |
| "value": "bAbI - Training energy (MWh)", |
| "description": "The bAbI benchmark for measuring understanding and reasoning [(Weston et al., 2015)](https://arxiv.org/pdf/1502.05698.pdf).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "bAbI" |
| } |
| }, |
| { |
| "value": "Dyck - Observed inference time (s)", |
| "description": "Scenario testing hierarchical reasoning through the Dyck formal languages [(Suzgun et al., 2019)](https://aclanthology.org/W19-3905/).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "Dyck" |
| } |
| }, |
| { |
| "value": "Dyck - Idealized inference time (s)", |
| "description": "Scenario testing hierarchical reasoning through the Dyck formal languages [(Suzgun et al., 2019)](https://aclanthology.org/W19-3905/).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "Dyck" |
| } |
| }, |
| { |
| "value": "Dyck - Denoised inference time (s)", |
| "description": "Scenario testing hierarchical reasoning through the Dyck formal languages [(Suzgun et al., 2019)](https://aclanthology.org/W19-3905/).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "Dyck" |
| } |
| }, |
| { |
| "value": "Dyck - Training emissions (kg CO2)", |
| "description": "Scenario testing hierarchical reasoning through the Dyck formal languages [(Suzgun et al., 2019)](https://aclanthology.org/W19-3905/).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "Dyck" |
| } |
| }, |
| { |
| "value": "Dyck - Training energy (MWh)", |
| "description": "Scenario testing hierarchical reasoning through the Dyck formal languages [(Suzgun et al., 2019)](https://aclanthology.org/W19-3905/).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "Dyck" |
| } |
| }, |
| { |
| "value": "GSM8K - Observed inference time (s)", |
| "description": "The grade school math word problems dataset (GSM8K) for testing mathematical reasoning on grade-school math problems [(Cobbe et al., 2021)](https://arxiv.org/pdf/2110.14168.pdf).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "GSM8K" |
| } |
| }, |
| { |
| "value": "GSM8K - Idealized inference time (s)", |
| "description": "The grade school math word problems dataset (GSM8K) for testing mathematical reasoning on grade-school math problems [(Cobbe et al., 2021)](https://arxiv.org/pdf/2110.14168.pdf).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "GSM8K" |
| } |
| }, |
| { |
| "value": "GSM8K - Denoised inference time (s)", |
| "description": "The grade school math word problems dataset (GSM8K) for testing mathematical reasoning on grade-school math problems [(Cobbe et al., 2021)](https://arxiv.org/pdf/2110.14168.pdf).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "GSM8K" |
| } |
| }, |
| { |
| "value": "GSM8K - Training emissions (kg CO2)", |
| "description": "The grade school math word problems dataset (GSM8K) for testing mathematical reasoning on grade-school math problems [(Cobbe et al., 2021)](https://arxiv.org/pdf/2110.14168.pdf).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "GSM8K" |
| } |
| }, |
| { |
| "value": "GSM8K - Training energy (MWh)", |
| "description": "The grade school math word problems dataset (GSM8K) for testing mathematical reasoning on grade-school math problems [(Cobbe et al., 2021)](https://arxiv.org/pdf/2110.14168.pdf).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "GSM8K" |
| } |
| }, |
| { |
| "value": "MATH - Observed inference time (s)", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "MATH" |
| } |
| }, |
| { |
| "value": "MATH - Idealized inference time (s)", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "MATH" |
| } |
| }, |
| { |
| "value": "MATH - Denoised inference time (s)", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "MATH" |
| } |
| }, |
| { |
| "value": "MATH - Training emissions (kg CO2)", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "MATH" |
| } |
| }, |
| { |
| "value": "MATH - Training energy (MWh)", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "MATH" |
| } |
| }, |
| { |
| "value": "MATH (chain-of-thought) - Observed inference time (s)", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems with chain-of-thought style reasoning [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "MATH (chain-of-thought)" |
| } |
| }, |
| { |
| "value": "MATH (chain-of-thought) - Idealized inference time (s)", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems with chain-of-thought style reasoning [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "MATH (chain-of-thought)" |
| } |
| }, |
| { |
| "value": "MATH (chain-of-thought) - Denoised inference time (s)", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems with chain-of-thought style reasoning [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "MATH (chain-of-thought)" |
| } |
| }, |
| { |
| "value": "MATH (chain-of-thought) - Training emissions (kg CO2)", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems with chain-of-thought style reasoning [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "MATH (chain-of-thought)" |
| } |
| }, |
| { |
| "value": "MATH (chain-of-thought) - Training energy (MWh)", |
| "description": "The MATH benchmark for measuring mathematical problem solving on competition math problems with chain-of-thought style reasoning [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/be83ab3ecd0db773eb2dc1b0a17836a1-Abstract-round2.html).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "MATH (chain-of-thought)" |
| } |
| }, |
| { |
| "value": "APPS (Code) - Observed inference time (s)", |
| "description": "The APPS benchmark for measuring competence on code challenges [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/c24cd76e1ce41366a4bbe8a49b02a028-Abstract-round2.html).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "APPS (Code)" |
| } |
| }, |
| { |
| "value": "APPS (Code) - Idealized inference time (s)", |
| "description": "The APPS benchmark for measuring competence on code challenges [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/c24cd76e1ce41366a4bbe8a49b02a028-Abstract-round2.html).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "APPS (Code)" |
| } |
| }, |
| { |
| "value": "APPS (Code) - Denoised inference time (s)", |
| "description": "The APPS benchmark for measuring competence on code challenges [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/c24cd76e1ce41366a4bbe8a49b02a028-Abstract-round2.html).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "APPS (Code)" |
| } |
| }, |
| { |
| "value": "APPS (Code) - Training emissions (kg CO2)", |
| "description": "The APPS benchmark for measuring competence on code challenges [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/c24cd76e1ce41366a4bbe8a49b02a028-Abstract-round2.html).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "APPS (Code)" |
| } |
| }, |
| { |
| "value": "APPS (Code) - Training energy (MWh)", |
| "description": "The APPS benchmark for measuring competence on code challenges [(Hendrycks et al., 2021)](https://datasets-benchmarks-proceedings.neurips.cc/paper/2021/hash/c24cd76e1ce41366a4bbe8a49b02a028-Abstract-round2.html).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "APPS (Code)" |
| } |
| }, |
| { |
| "value": "HumanEval (Code) - Observed inference time (s)", |
| "description": "The HumanEval benchmark for measuring functional correctness for synthesizing programs from docstrings [(Chen et al., 2021)](https://arxiv.org/pdf/2107.03374.pdf).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "HumanEval (Code)" |
| } |
| }, |
| { |
| "value": "HumanEval (Code) - Idealized inference time (s)", |
| "description": "The HumanEval benchmark for measuring functional correctness for synthesizing programs from docstrings [(Chen et al., 2021)](https://arxiv.org/pdf/2107.03374.pdf).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "HumanEval (Code)" |
| } |
| }, |
| { |
| "value": "HumanEval (Code) - Denoised inference time (s)", |
| "description": "The HumanEval benchmark for measuring functional correctness for synthesizing programs from docstrings [(Chen et al., 2021)](https://arxiv.org/pdf/2107.03374.pdf).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "HumanEval (Code)" |
| } |
| }, |
| { |
| "value": "HumanEval (Code) - Training emissions (kg CO2)", |
| "description": "The HumanEval benchmark for measuring functional correctness for synthesizing programs from docstrings [(Chen et al., 2021)](https://arxiv.org/pdf/2107.03374.pdf).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "HumanEval (Code)" |
| } |
| }, |
| { |
| "value": "HumanEval (Code) - Training energy (MWh)", |
| "description": "The HumanEval benchmark for measuring functional correctness for synthesizing programs from docstrings [(Chen et al., 2021)](https://arxiv.org/pdf/2107.03374.pdf).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "HumanEval (Code)" |
| } |
| }, |
| { |
| "value": "LSAT - Observed inference time (s)", |
| "description": "The LSAT benchmark for measuring analytical reasoning on the Law School Admission Test (LSAT; [Zhong et al., 2021](https://arxiv.org/pdf/2104.06598.pdf)).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "LSAT" |
| } |
| }, |
| { |
| "value": "LSAT - Idealized inference time (s)", |
| "description": "The LSAT benchmark for measuring analytical reasoning on the Law School Admission Test (LSAT; [Zhong et al., 2021](https://arxiv.org/pdf/2104.06598.pdf)).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "LSAT" |
| } |
| }, |
| { |
| "value": "LSAT - Denoised inference time (s)", |
| "description": "The LSAT benchmark for measuring analytical reasoning on the Law School Admission Test (LSAT; [Zhong et al., 2021](https://arxiv.org/pdf/2104.06598.pdf)).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "LSAT" |
| } |
| }, |
| { |
| "value": "LSAT - Training emissions (kg CO2)", |
| "description": "The LSAT benchmark for measuring analytical reasoning on the Law School Admission Test (LSAT; [Zhong et al., 2021](https://arxiv.org/pdf/2104.06598.pdf)).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "LSAT" |
| } |
| }, |
| { |
| "value": "LSAT - Training energy (MWh)", |
| "description": "The LSAT benchmark for measuring analytical reasoning on the Law School Admission Test (LSAT; [Zhong et al., 2021](https://arxiv.org/pdf/2104.06598.pdf)).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "LSAT" |
| } |
| }, |
| { |
| "value": "LegalSupport - Observed inference time (s)", |
| "description": "Scenario introduced in this work to measure fine-grained legal reasoning through reverse entailment.\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "LegalSupport" |
| } |
| }, |
| { |
| "value": "LegalSupport - Idealized inference time (s)", |
| "description": "Scenario introduced in this work to measure fine-grained legal reasoning through reverse entailment.\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "LegalSupport" |
| } |
| }, |
| { |
| "value": "LegalSupport - Denoised inference time (s)", |
| "description": "Scenario introduced in this work to measure fine-grained legal reasoning through reverse entailment.\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "LegalSupport" |
| } |
| }, |
| { |
| "value": "LegalSupport - Training emissions (kg CO2)", |
| "description": "Scenario introduced in this work to measure fine-grained legal reasoning through reverse entailment.\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "LegalSupport" |
| } |
| }, |
| { |
| "value": "LegalSupport - Training energy (MWh)", |
| "description": "Scenario introduced in this work to measure fine-grained legal reasoning through reverse entailment.\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "LegalSupport" |
| } |
| }, |
| { |
| "value": "Data imputation - Observed inference time (s)", |
| "description": "Scenario from [Mei et al. (2021)](https://ieeexplore.ieee.org/document/9458712/) that tests the ability to impute missing entities in a data table.\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "Data imputation" |
| } |
| }, |
| { |
| "value": "Data imputation - Idealized inference time (s)", |
| "description": "Scenario from [Mei et al. (2021)](https://ieeexplore.ieee.org/document/9458712/) that tests the ability to impute missing entities in a data table.\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "Data imputation" |
| } |
| }, |
| { |
| "value": "Data imputation - Denoised inference time (s)", |
| "description": "Scenario from [Mei et al. (2021)](https://ieeexplore.ieee.org/document/9458712/) that tests the ability to impute missing entities in a data table.\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "Data imputation" |
| } |
| }, |
| { |
| "value": "Data imputation - Training emissions (kg CO2)", |
| "description": "Scenario from [Mei et al. (2021)](https://ieeexplore.ieee.org/document/9458712/) that tests the ability to impute missing entities in a data table.\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "Data imputation" |
| } |
| }, |
| { |
| "value": "Data imputation - Training energy (MWh)", |
| "description": "Scenario from [Mei et al. (2021)](https://ieeexplore.ieee.org/document/9458712/) that tests the ability to impute missing entities in a data table.\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "Data imputation" |
| } |
| }, |
| { |
| "value": "Entity matching - Observed inference time (s)", |
| "description": "Scenario from Magellan [(Konda et al., 2016)](https://dl.acm.org/doi/10.14778/3007263.3007314) that tests the ability to determine if two entities match.\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "Entity matching" |
| } |
| }, |
| { |
| "value": "Entity matching - Idealized inference time (s)", |
| "description": "Scenario from Magellan [(Konda et al., 2016)](https://dl.acm.org/doi/10.14778/3007263.3007314) that tests the ability to determine if two entities match.\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "Entity matching" |
| } |
| }, |
| { |
| "value": "Entity matching - Denoised inference time (s)", |
| "description": "Scenario from Magellan [(Konda et al., 2016)](https://dl.acm.org/doi/10.14778/3007263.3007314) that tests the ability to determine if two entities match.\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "Entity matching" |
| } |
| }, |
| { |
| "value": "Entity matching - Training emissions (kg CO2)", |
| "description": "Scenario from Magellan [(Konda et al., 2016)](https://dl.acm.org/doi/10.14778/3007263.3007314) that tests the ability to determine if two entities match.\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "Entity matching" |
| } |
| }, |
| { |
| "value": "Entity matching - Training energy (MWh)", |
| "description": "Scenario from Magellan [(Konda et al., 2016)](https://dl.acm.org/doi/10.14778/3007263.3007314) that tests the ability to determine if two entities match.\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "Entity matching" |
| } |
| }, |
| { |
| "value": "Copyright (text) - Observed inference time (s)", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for books, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "Copyright (text)" |
| } |
| }, |
| { |
| "value": "Copyright (text) - Idealized inference time (s)", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for books, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "Copyright (text)" |
| } |
| }, |
| { |
| "value": "Copyright (text) - Denoised inference time (s)", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for books, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "Copyright (text)" |
| } |
| }, |
| { |
| "value": "Copyright (text) - Training emissions (kg CO2)", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for books, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "Copyright (text)" |
| } |
| }, |
| { |
| "value": "Copyright (text) - Training energy (MWh)", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for books, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "Copyright (text)" |
| } |
| }, |
| { |
| "value": "Copyright (code) - Observed inference time (s)", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for code, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "Copyright (code)" |
| } |
| }, |
| { |
| "value": "Copyright (code) - Idealized inference time (s)", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for code, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "Copyright (code)" |
| } |
| }, |
| { |
| "value": "Copyright (code) - Denoised inference time (s)", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for code, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "Copyright (code)" |
| } |
| }, |
| { |
| "value": "Copyright (code) - Training emissions (kg CO2)", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for code, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "Copyright (code)" |
| } |
| }, |
| { |
| "value": "Copyright (code) - Training energy (MWh)", |
| "description": "Scenario introduced in this work to measure copyright and memorization behavior for code, based off of [Carlini et al. (2021)](https://www.usenix.org/biblio-11958).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "Copyright (code)" |
| } |
| }, |
| { |
| "value": "Disinformation (reiteration) - Observed inference time (s)", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to reiterate disinformation content.\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "Disinformation (reiteration)" |
| } |
| }, |
| { |
| "value": "Disinformation (reiteration) - Idealized inference time (s)", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to reiterate disinformation content.\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "Disinformation (reiteration)" |
| } |
| }, |
| { |
| "value": "Disinformation (reiteration) - Denoised inference time (s)", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to reiterate disinformation content.\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "Disinformation (reiteration)" |
| } |
| }, |
| { |
| "value": "Disinformation (reiteration) - Training emissions (kg CO2)", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to reiterate disinformation content.\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "Disinformation (reiteration)" |
| } |
| }, |
| { |
| "value": "Disinformation (reiteration) - Training energy (MWh)", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to reiterate disinformation content.\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "Disinformation (reiteration)" |
| } |
| }, |
| { |
| "value": "Disinformation (wedging) - Observed inference time (s)", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to generate divisive and wedging content.\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "Disinformation (wedging)" |
| } |
| }, |
| { |
| "value": "Disinformation (wedging) - Idealized inference time (s)", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to generate divisive and wedging content.\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "Disinformation (wedging)" |
| } |
| }, |
| { |
| "value": "Disinformation (wedging) - Denoised inference time (s)", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to generate divisive and wedging content.\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "Disinformation (wedging)" |
| } |
| }, |
| { |
| "value": "Disinformation (wedging) - Training emissions (kg CO2)", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to generate divisive and wedging content.\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "Disinformation (wedging)" |
| } |
| }, |
| { |
| "value": "Disinformation (wedging) - Training energy (MWh)", |
| "description": "Scenario from [Buchanan et al. (2021)](https://cset.georgetown.edu/publication/truth-lies-and-automation/) that tests the ability to generate divisive and wedging content.\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "Disinformation (wedging)" |
| } |
| }, |
| { |
| "value": "BBQ - Observed inference time (s)", |
| "description": "The Bias Benchmark for Question Answering (BBQ) for measuring social bias in question answering in ambiguous and unambigous context [(Parrish et al., 2022)](https://aclanthology.org/2022.findings-acl.165/).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "BBQ" |
| } |
| }, |
| { |
| "value": "BBQ - Idealized inference time (s)", |
| "description": "The Bias Benchmark for Question Answering (BBQ) for measuring social bias in question answering in ambiguous and unambigous context [(Parrish et al., 2022)](https://aclanthology.org/2022.findings-acl.165/).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "BBQ" |
| } |
| }, |
| { |
| "value": "BBQ - Denoised inference time (s)", |
| "description": "The Bias Benchmark for Question Answering (BBQ) for measuring social bias in question answering in ambiguous and unambigous context [(Parrish et al., 2022)](https://aclanthology.org/2022.findings-acl.165/).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "BBQ" |
| } |
| }, |
| { |
| "value": "BBQ - Training emissions (kg CO2)", |
| "description": "The Bias Benchmark for Question Answering (BBQ) for measuring social bias in question answering in ambiguous and unambigous context [(Parrish et al., 2022)](https://aclanthology.org/2022.findings-acl.165/).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "BBQ" |
| } |
| }, |
| { |
| "value": "BBQ - Training energy (MWh)", |
| "description": "The Bias Benchmark for Question Answering (BBQ) for measuring social bias in question answering in ambiguous and unambigous context [(Parrish et al., 2022)](https://aclanthology.org/2022.findings-acl.165/).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "BBQ" |
| } |
| }, |
| { |
| "value": "BOLD - Observed inference time (s)", |
| "description": "The Bias in Open-Ended Language Generation Dataset (BOLD) for measuring biases and toxicity in open-ended language generation [(Dhamala et al., 2021)](https://dl.acm.org/doi/10.1145/3442188.3445924).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "BOLD" |
| } |
| }, |
| { |
| "value": "BOLD - Idealized inference time (s)", |
| "description": "The Bias in Open-Ended Language Generation Dataset (BOLD) for measuring biases and toxicity in open-ended language generation [(Dhamala et al., 2021)](https://dl.acm.org/doi/10.1145/3442188.3445924).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "BOLD" |
| } |
| }, |
| { |
| "value": "BOLD - Denoised inference time (s)", |
| "description": "The Bias in Open-Ended Language Generation Dataset (BOLD) for measuring biases and toxicity in open-ended language generation [(Dhamala et al., 2021)](https://dl.acm.org/doi/10.1145/3442188.3445924).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "BOLD" |
| } |
| }, |
| { |
| "value": "BOLD - Training emissions (kg CO2)", |
| "description": "The Bias in Open-Ended Language Generation Dataset (BOLD) for measuring biases and toxicity in open-ended language generation [(Dhamala et al., 2021)](https://dl.acm.org/doi/10.1145/3442188.3445924).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "BOLD" |
| } |
| }, |
| { |
| "value": "BOLD - Training energy (MWh)", |
| "description": "The Bias in Open-Ended Language Generation Dataset (BOLD) for measuring biases and toxicity in open-ended language generation [(Dhamala et al., 2021)](https://dl.acm.org/doi/10.1145/3442188.3445924).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "BOLD" |
| } |
| }, |
| { |
| "value": "RealToxicityPrompts - Observed inference time (s)", |
| "description": "The RealToxicityPrompts dataset for measuring toxicity in prompted model generations [(Gehman et al., 2020)](https://aclanthology.org/2020.findings-emnlp.301/).\n\nObserved inference runtime (s): Average observed time to process a request to the model (via an API, and thus depends on particular deployment).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Observed inference time (s)", |
| "run_group": "RealToxicityPrompts" |
| } |
| }, |
| { |
| "value": "RealToxicityPrompts - Idealized inference time (s)", |
| "description": "The RealToxicityPrompts dataset for measuring toxicity in prompted model generations [(Gehman et al., 2020)](https://aclanthology.org/2020.findings-emnlp.301/).\n\nIdealized inference runtime (s): Average time to process a request to the model based solely on the model architecture (using Megatron-LM).", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Idealized inference time (s)", |
| "run_group": "RealToxicityPrompts" |
| } |
| }, |
| { |
| "value": "RealToxicityPrompts - Denoised inference time (s)", |
| "description": "The RealToxicityPrompts dataset for measuring toxicity in prompted model generations [(Gehman et al., 2020)](https://aclanthology.org/2020.findings-emnlp.301/).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Denoised inference time (s)", |
| "run_group": "RealToxicityPrompts" |
| } |
| }, |
| { |
| "value": "RealToxicityPrompts - Training emissions (kg CO2)", |
| "description": "The RealToxicityPrompts dataset for measuring toxicity in prompted model generations [(Gehman et al., 2020)](https://aclanthology.org/2020.findings-emnlp.301/).\n\nEstimated training emissions (kg CO2): Estimate of the CO2 emissions from training the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training emissions (kg CO2)", |
| "run_group": "RealToxicityPrompts" |
| } |
| }, |
| { |
| "value": "RealToxicityPrompts - Training energy (MWh)", |
| "description": "The RealToxicityPrompts dataset for measuring toxicity in prompted model generations [(Gehman et al., 2020)](https://aclanthology.org/2020.findings-emnlp.301/).\n\nEstimated training energy cost (MWh): Estimate of the amount of energy used to train the model.", |
| "markdown": false, |
| "lower_is_better": true, |
| "metadata": { |
| "metric": "Training energy (MWh)", |
| "run_group": "RealToxicityPrompts" |
| } |
| } |
| ], |
| "rows": [ |
| [ |
| { |
| "value": "EleutherAI/pythia-2.8b", |
| "description": "", |
| "markdown": false |
| }, |
| { |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "value": 0.2254588010771233, |
| "description": "min=0.177, mean=0.225, max=0.295, sum=1.127 (5)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "mmlu:subject=abstract_algebra,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=college_chemistry,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=computer_security,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=econometrics,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=us_foreign_policy,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical" |
| ] |
| }, |
| { |
| "description": "5 matching runs, but no matching metrics", |
| "markdown": false |
| }, |
| { |
| "description": "5 matching runs, but no matching metrics", |
| "markdown": false |
| }, |
| { |
| "description": "(0)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "mmlu:subject=abstract_algebra,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=college_chemistry,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=computer_security,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=econometrics,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=us_foreign_policy,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical" |
| ] |
| }, |
| { |
| "description": "(0)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "mmlu:subject=abstract_algebra,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=college_chemistry,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=computer_security,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=econometrics,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical", |
| "mmlu:subject=us_foreign_policy,method=multiple_choice_joint,model=EleutherAI_pythia-2.8b,data_augmentation=canonical" |
| ] |
| }, |
| { |
| "value": 0.1377072874255742, |
| "description": "min=0.118, mean=0.138, max=0.161, sum=1.377 (10)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "wikifact:k=5,subject=author,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=currency,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=discoverer_or_inventor,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=instance_of,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=medical_condition_treated,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=part_of,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=place_of_birth,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=plaintiff,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=position_held,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=symptoms_and_signs,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "10 matching runs, but no matching metrics", |
| "markdown": false |
| }, |
| { |
| "description": "10 matching runs, but no matching metrics", |
| "markdown": false |
| }, |
| { |
| "description": "(0)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "wikifact:k=5,subject=author,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=currency,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=discoverer_or_inventor,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=instance_of,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=medical_condition_treated,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=part_of,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=place_of_birth,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=plaintiff,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=position_held,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=symptoms_and_signs,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "(0)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "wikifact:k=5,subject=author,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=currency,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=discoverer_or_inventor,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=instance_of,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=medical_condition_treated,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=part_of,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=place_of_birth,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=plaintiff,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=position_held,model=EleutherAI_pythia-2.8b", |
| "wikifact:k=5,subject=symptoms_and_signs,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 0.2355702184164794, |
| "description": "min=0.181, mean=0.236, max=0.271, sum=0.707 (3)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "synthetic_reasoning:mode=induction,model=EleutherAI_pythia-2.8b", |
| "synthetic_reasoning:mode=pattern_match,model=EleutherAI_pythia-2.8b", |
| "synthetic_reasoning:mode=variable_substitution,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "3 matching runs, but no matching metrics", |
| "markdown": false |
| }, |
| { |
| "description": "3 matching runs, but no matching metrics", |
| "markdown": false |
| }, |
| { |
| "description": "(0)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "synthetic_reasoning:mode=induction,model=EleutherAI_pythia-2.8b", |
| "synthetic_reasoning:mode=pattern_match,model=EleutherAI_pythia-2.8b", |
| "synthetic_reasoning:mode=variable_substitution,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "(0)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "synthetic_reasoning:mode=induction,model=EleutherAI_pythia-2.8b", |
| "synthetic_reasoning:mode=pattern_match,model=EleutherAI_pythia-2.8b", |
| "synthetic_reasoning:mode=variable_substitution,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "value": 0.23959948684995538, |
| "description": "min=0.24, mean=0.24, max=0.24, sum=0.24 (1)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "babi_qa:task=all,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "1 matching runs, but no matching metrics", |
| "markdown": false |
| }, |
| { |
| "description": "1 matching runs, but no matching metrics", |
| "markdown": false |
| }, |
| { |
| "description": "(0)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "babi_qa:task=all,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "(0)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "babi_qa:task=all,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 0.15079361009597778, |
| "description": "min=0.151, mean=0.151, max=0.151, sum=0.151 (1)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "dyck_language_np=3:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "1 matching runs, but no matching metrics", |
| "markdown": false |
| }, |
| { |
| "description": "1 matching runs, but no matching metrics", |
| "markdown": false |
| }, |
| { |
| "description": "(0)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "dyck_language_np=3:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "(0)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "dyck_language_np=3:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 2.084434862613678, |
| "description": "min=2.084, mean=2.084, max=2.084, sum=2.084 (1)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "gsm:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "1 matching runs, but no matching metrics", |
| "markdown": false |
| }, |
| { |
| "description": "1 matching runs, but no matching metrics", |
| "markdown": false |
| }, |
| { |
| "description": "(0)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "gsm:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "(0)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "gsm:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "value": 0.27627374448171665, |
| "description": "min=0.276, mean=0.276, max=0.276, sum=0.276 (1)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "legal_support,method=multiple_choice_joint:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "1 matching runs, but no matching metrics", |
| "markdown": false |
| }, |
| { |
| "description": "1 matching runs, but no matching metrics", |
| "markdown": false |
| }, |
| { |
| "description": "(0)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "legal_support,method=multiple_choice_joint:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "(0)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "legal_support,method=multiple_choice_joint:model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 0.19507627996860977, |
| "description": "min=0.183, mean=0.195, max=0.207, sum=0.39 (2)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "entity_data_imputation:dataset=Buy,model=EleutherAI_pythia-2.8b", |
| "entity_data_imputation:dataset=Restaurant,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "2 matching runs, but no matching metrics", |
| "markdown": false |
| }, |
| { |
| "description": "2 matching runs, but no matching metrics", |
| "markdown": false |
| }, |
| { |
| "description": "(0)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "entity_data_imputation:dataset=Buy,model=EleutherAI_pythia-2.8b", |
| "entity_data_imputation:dataset=Restaurant,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "(0)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "entity_data_imputation:dataset=Buy,model=EleutherAI_pythia-2.8b", |
| "entity_data_imputation:dataset=Restaurant,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "value": 0.4729115394940713, |
| "description": "min=0.308, mean=0.473, max=0.604, sum=1.419 (3)", |
| "style": { |
| "font-weight": "bold" |
| }, |
| "markdown": false, |
| "run_spec_names": [ |
| "entity_matching:dataset=Abt_Buy,model=EleutherAI_pythia-2.8b", |
| "entity_matching:dataset=Beer,model=EleutherAI_pythia-2.8b", |
| "entity_matching:dataset=Dirty_iTunes_Amazon,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "3 matching runs, but no matching metrics", |
| "markdown": false |
| }, |
| { |
| "description": "3 matching runs, but no matching metrics", |
| "markdown": false |
| }, |
| { |
| "description": "(0)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "entity_matching:dataset=Abt_Buy,model=EleutherAI_pythia-2.8b", |
| "entity_matching:dataset=Beer,model=EleutherAI_pythia-2.8b", |
| "entity_matching:dataset=Dirty_iTunes_Amazon,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "(0)", |
| "style": {}, |
| "markdown": false, |
| "run_spec_names": [ |
| "entity_matching:dataset=Abt_Buy,model=EleutherAI_pythia-2.8b", |
| "entity_matching:dataset=Beer,model=EleutherAI_pythia-2.8b", |
| "entity_matching:dataset=Dirty_iTunes_Amazon,model=EleutherAI_pythia-2.8b" |
| ] |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| }, |
| { |
| "description": "No matching runs", |
| "markdown": false |
| } |
| ] |
| ], |
| "links": [ |
| { |
| "text": "LaTeX", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/targeted_evaluations_efficiency_detailed.tex" |
| }, |
| { |
| "text": "JSON", |
| "href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/targeted_evaluations_efficiency_detailed.json" |
| } |
| ], |
| "name": "efficiency_detailed" |
| } |
| ] |