yuhengtu's picture
Upload folder using huggingface_hub
f0e8b2a verified
[
{
"title": "WikiFact",
"header": [
{
"value": "Model",
"markdown": false,
"metadata": {}
},
{
"value": "EM",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.",
"markdown": false,
"lower_is_better": false,
"metadata": {
"metric": "EM",
"run_group": "WikiFact"
}
},
{
"value": "Denoised inference time (s)",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.",
"markdown": false,
"lower_is_better": true,
"metadata": {
"metric": "Denoised inference time (s)",
"run_group": "WikiFact"
}
},
{
"value": "# eval",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# eval: Number of evaluation instances.",
"markdown": false,
"metadata": {
"metric": "# eval",
"run_group": "WikiFact"
}
},
{
"value": "# train",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# train: Number of training instances (e.g., in-context examples).",
"markdown": false,
"metadata": {
"metric": "# train",
"run_group": "WikiFact"
}
},
{
"value": "truncated",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).",
"markdown": false,
"metadata": {
"metric": "truncated",
"run_group": "WikiFact"
}
},
{
"value": "# prompt tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# prompt tokens: Number of tokens in the prompt.",
"markdown": false,
"metadata": {
"metric": "# prompt tokens",
"run_group": "WikiFact"
}
},
{
"value": "# output tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# output tokens: Actual number of output tokens.",
"markdown": false,
"metadata": {
"metric": "# output tokens",
"run_group": "WikiFact"
}
},
{
"value": "# trials",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.",
"markdown": false,
"metadata": {
"metric": "# trials",
"run_group": "WikiFact"
}
}
],
"rows": [
[
{
"value": "EleutherAI/pythia-2.8b",
"description": "",
"markdown": false
},
{
"value": 0.0075921845770110154,
"description": "min=0, mean=0.008, max=0.034, sum=0.076 (10)",
"style": {
"font-weight": "bold"
},
"markdown": false,
"run_spec_names": [
"wikifact:k=5,subject=author,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=currency,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=discoverer_or_inventor,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=instance_of,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=medical_condition_treated,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=part_of,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=place_of_birth,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=plaintiff,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=position_held,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=symptoms_and_signs,model=EleutherAI_pythia-2.8b"
]
},
{
"description": "10 matching runs, but no matching metrics",
"markdown": false
},
{
"value": 746.2,
"description": "min=96, mean=746.2, max=850, sum=7462 (10)",
"style": {},
"markdown": false,
"run_spec_names": [
"wikifact:k=5,subject=author,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=currency,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=discoverer_or_inventor,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=instance_of,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=medical_condition_treated,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=part_of,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=place_of_birth,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=plaintiff,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=position_held,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=symptoms_and_signs,model=EleutherAI_pythia-2.8b"
]
},
{
"value": 5.0,
"description": "min=5, mean=5, max=5, sum=50 (10)",
"style": {},
"markdown": false,
"run_spec_names": [
"wikifact:k=5,subject=author,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=currency,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=discoverer_or_inventor,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=instance_of,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=medical_condition_treated,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=part_of,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=place_of_birth,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=plaintiff,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=position_held,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=symptoms_and_signs,model=EleutherAI_pythia-2.8b"
]
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (10)",
"style": {},
"markdown": false,
"run_spec_names": [
"wikifact:k=5,subject=author,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=currency,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=discoverer_or_inventor,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=instance_of,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=medical_condition_treated,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=part_of,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=place_of_birth,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=plaintiff,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=position_held,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=symptoms_and_signs,model=EleutherAI_pythia-2.8b"
]
},
{
"value": 83.9422429501836,
"description": "min=65.855, mean=83.942, max=128.292, sum=839.422 (10)",
"style": {},
"markdown": false,
"run_spec_names": [
"wikifact:k=5,subject=author,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=currency,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=discoverer_or_inventor,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=instance_of,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=medical_condition_treated,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=part_of,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=place_of_birth,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=plaintiff,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=position_held,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=symptoms_and_signs,model=EleutherAI_pythia-2.8b"
]
},
{
"value": 4.5395219202522,
"description": "min=3.02, mean=4.54, max=6.332, sum=45.395 (10)",
"style": {},
"markdown": false,
"run_spec_names": [
"wikifact:k=5,subject=author,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=currency,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=discoverer_or_inventor,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=instance_of,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=medical_condition_treated,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=part_of,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=place_of_birth,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=plaintiff,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=position_held,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=symptoms_and_signs,model=EleutherAI_pythia-2.8b"
]
},
{
"value": 1.0,
"description": "min=1, mean=1, max=1, sum=10 (10)",
"style": {},
"markdown": false,
"run_spec_names": [
"wikifact:k=5,subject=author,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=currency,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=discoverer_or_inventor,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=instance_of,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=medical_condition_treated,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=part_of,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=place_of_birth,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=plaintiff,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=position_held,model=EleutherAI_pythia-2.8b",
"wikifact:k=5,subject=symptoms_and_signs,model=EleutherAI_pythia-2.8b"
]
}
]
],
"links": [
{
"text": "LaTeX",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/wikifact_wikifact.tex"
},
{
"text": "JSON",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/wikifact_wikifact.json"
}
],
"name": "wikifact"
},
{
"title": "subject: author",
"header": [
{
"value": "Model",
"markdown": false,
"metadata": {}
},
{
"value": "EM",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.",
"markdown": false,
"lower_is_better": false,
"metadata": {
"metric": "EM",
"run_group": "WikiFact"
}
},
{
"value": "Denoised inference time (s)",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.",
"markdown": false,
"lower_is_better": true,
"metadata": {
"metric": "Denoised inference time (s)",
"run_group": "WikiFact"
}
},
{
"value": "# eval",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# eval: Number of evaluation instances.",
"markdown": false,
"metadata": {
"metric": "# eval",
"run_group": "WikiFact"
}
},
{
"value": "# train",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# train: Number of training instances (e.g., in-context examples).",
"markdown": false,
"metadata": {
"metric": "# train",
"run_group": "WikiFact"
}
},
{
"value": "truncated",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).",
"markdown": false,
"metadata": {
"metric": "truncated",
"run_group": "WikiFact"
}
},
{
"value": "# prompt tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# prompt tokens: Number of tokens in the prompt.",
"markdown": false,
"metadata": {
"metric": "# prompt tokens",
"run_group": "WikiFact"
}
},
{
"value": "# output tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# output tokens: Actual number of output tokens.",
"markdown": false,
"metadata": {
"metric": "# output tokens",
"run_group": "WikiFact"
}
},
{
"value": "# trials",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.",
"markdown": false,
"metadata": {
"metric": "# trials",
"run_group": "WikiFact"
}
}
],
"rows": [
[
{
"value": "EleutherAI/pythia-2.8b",
"description": "",
"href": "?group=wikifact&subgroup=subject%3A%20author&runSpecs=%5B%22wikifact%3Ak%3D5%2Csubject%3Dauthor%2Cmodel%3DEleutherAI_pythia-2.8b%22%5D",
"markdown": false,
"run_spec_names": [
"wikifact:k=5,subject=author,model=EleutherAI_pythia-2.8b"
]
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (1)",
"style": {
"font-weight": "bold"
},
"markdown": false
},
{
"description": "1 matching runs, but no matching metrics",
"markdown": false
},
{
"value": 850.0,
"description": "min=850, mean=850, max=850, sum=850 (1)",
"style": {},
"markdown": false
},
{
"value": 5.0,
"description": "min=5, mean=5, max=5, sum=5 (1)",
"style": {},
"markdown": false
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (1)",
"style": {},
"markdown": false
},
{
"value": 77.12235294117647,
"description": "min=77.122, mean=77.122, max=77.122, sum=77.122 (1)",
"style": {},
"markdown": false
},
{
"value": 4.3494117647058825,
"description": "min=4.349, mean=4.349, max=4.349, sum=4.349 (1)",
"style": {},
"markdown": false
},
{
"value": 1.0,
"description": "min=1, mean=1, max=1, sum=1 (1)",
"style": {},
"markdown": false
}
]
],
"links": [
{
"text": "LaTeX",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/wikifact_wikifact_subject:author.tex"
},
{
"text": "JSON",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/wikifact_wikifact_subject:author.json"
}
],
"name": "wikifact_subject:author"
},
{
"title": "subject: currency",
"header": [
{
"value": "Model",
"markdown": false,
"metadata": {}
},
{
"value": "EM",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.",
"markdown": false,
"lower_is_better": false,
"metadata": {
"metric": "EM",
"run_group": "WikiFact"
}
},
{
"value": "Denoised inference time (s)",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.",
"markdown": false,
"lower_is_better": true,
"metadata": {
"metric": "Denoised inference time (s)",
"run_group": "WikiFact"
}
},
{
"value": "# eval",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# eval: Number of evaluation instances.",
"markdown": false,
"metadata": {
"metric": "# eval",
"run_group": "WikiFact"
}
},
{
"value": "# train",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# train: Number of training instances (e.g., in-context examples).",
"markdown": false,
"metadata": {
"metric": "# train",
"run_group": "WikiFact"
}
},
{
"value": "truncated",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).",
"markdown": false,
"metadata": {
"metric": "truncated",
"run_group": "WikiFact"
}
},
{
"value": "# prompt tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# prompt tokens: Number of tokens in the prompt.",
"markdown": false,
"metadata": {
"metric": "# prompt tokens",
"run_group": "WikiFact"
}
},
{
"value": "# output tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# output tokens: Actual number of output tokens.",
"markdown": false,
"metadata": {
"metric": "# output tokens",
"run_group": "WikiFact"
}
},
{
"value": "# trials",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.",
"markdown": false,
"metadata": {
"metric": "# trials",
"run_group": "WikiFact"
}
}
],
"rows": [
[
{
"value": "EleutherAI/pythia-2.8b",
"description": "",
"href": "?group=wikifact&subgroup=subject%3A%20currency&runSpecs=%5B%22wikifact%3Ak%3D5%2Csubject%3Dcurrency%2Cmodel%3DEleutherAI_pythia-2.8b%22%5D",
"markdown": false,
"run_spec_names": [
"wikifact:k=5,subject=currency,model=EleutherAI_pythia-2.8b"
]
},
{
"value": 0.010588235294117647,
"description": "min=0.011, mean=0.011, max=0.011, sum=0.011 (1)",
"style": {
"font-weight": "bold"
},
"markdown": false
},
{
"description": "1 matching runs, but no matching metrics",
"markdown": false
},
{
"value": 850.0,
"description": "min=850, mean=850, max=850, sum=850 (1)",
"style": {},
"markdown": false
},
{
"value": 5.0,
"description": "min=5, mean=5, max=5, sum=5 (1)",
"style": {},
"markdown": false
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (1)",
"style": {},
"markdown": false
},
{
"value": 67.89294117647059,
"description": "min=67.893, mean=67.893, max=67.893, sum=67.893 (1)",
"style": {},
"markdown": false
},
{
"value": 3.6458823529411766,
"description": "min=3.646, mean=3.646, max=3.646, sum=3.646 (1)",
"style": {},
"markdown": false
},
{
"value": 1.0,
"description": "min=1, mean=1, max=1, sum=1 (1)",
"style": {},
"markdown": false
}
]
],
"links": [
{
"text": "LaTeX",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/wikifact_wikifact_subject:currency.tex"
},
{
"text": "JSON",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/wikifact_wikifact_subject:currency.json"
}
],
"name": "wikifact_subject:currency"
},
{
"title": "subject: discoverer_or_inventor",
"header": [
{
"value": "Model",
"markdown": false,
"metadata": {}
},
{
"value": "EM",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.",
"markdown": false,
"lower_is_better": false,
"metadata": {
"metric": "EM",
"run_group": "WikiFact"
}
},
{
"value": "Denoised inference time (s)",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.",
"markdown": false,
"lower_is_better": true,
"metadata": {
"metric": "Denoised inference time (s)",
"run_group": "WikiFact"
}
},
{
"value": "# eval",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# eval: Number of evaluation instances.",
"markdown": false,
"metadata": {
"metric": "# eval",
"run_group": "WikiFact"
}
},
{
"value": "# train",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# train: Number of training instances (e.g., in-context examples).",
"markdown": false,
"metadata": {
"metric": "# train",
"run_group": "WikiFact"
}
},
{
"value": "truncated",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).",
"markdown": false,
"metadata": {
"metric": "truncated",
"run_group": "WikiFact"
}
},
{
"value": "# prompt tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# prompt tokens: Number of tokens in the prompt.",
"markdown": false,
"metadata": {
"metric": "# prompt tokens",
"run_group": "WikiFact"
}
},
{
"value": "# output tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# output tokens: Actual number of output tokens.",
"markdown": false,
"metadata": {
"metric": "# output tokens",
"run_group": "WikiFact"
}
},
{
"value": "# trials",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.",
"markdown": false,
"metadata": {
"metric": "# trials",
"run_group": "WikiFact"
}
}
],
"rows": [
[
{
"value": "EleutherAI/pythia-2.8b",
"description": "",
"href": "?group=wikifact&subgroup=subject%3A%20discoverer_or_inventor&runSpecs=%5B%22wikifact%3Ak%3D5%2Csubject%3Ddiscoverer_or_inventor%2Cmodel%3DEleutherAI_pythia-2.8b%22%5D",
"markdown": false,
"run_spec_names": [
"wikifact:k=5,subject=discoverer_or_inventor,model=EleutherAI_pythia-2.8b"
]
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (1)",
"style": {
"font-weight": "bold"
},
"markdown": false
},
{
"description": "1 matching runs, but no matching metrics",
"markdown": false
},
{
"value": 850.0,
"description": "min=850, mean=850, max=850, sum=850 (1)",
"style": {},
"markdown": false
},
{
"value": 5.0,
"description": "min=5, mean=5, max=5, sum=5 (1)",
"style": {},
"markdown": false
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (1)",
"style": {},
"markdown": false
},
{
"value": 87.16117647058823,
"description": "min=87.161, mean=87.161, max=87.161, sum=87.161 (1)",
"style": {},
"markdown": false
},
{
"value": 5.28,
"description": "min=5.28, mean=5.28, max=5.28, sum=5.28 (1)",
"style": {},
"markdown": false
},
{
"value": 1.0,
"description": "min=1, mean=1, max=1, sum=1 (1)",
"style": {},
"markdown": false
}
]
],
"links": [
{
"text": "LaTeX",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/wikifact_wikifact_subject:discoverer_or_inventor.tex"
},
{
"text": "JSON",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/wikifact_wikifact_subject:discoverer_or_inventor.json"
}
],
"name": "wikifact_subject:discoverer_or_inventor"
},
{
"title": "subject: instance_of",
"header": [
{
"value": "Model",
"markdown": false,
"metadata": {}
},
{
"value": "EM",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.",
"markdown": false,
"lower_is_better": false,
"metadata": {
"metric": "EM",
"run_group": "WikiFact"
}
},
{
"value": "Denoised inference time (s)",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.",
"markdown": false,
"lower_is_better": true,
"metadata": {
"metric": "Denoised inference time (s)",
"run_group": "WikiFact"
}
},
{
"value": "# eval",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# eval: Number of evaluation instances.",
"markdown": false,
"metadata": {
"metric": "# eval",
"run_group": "WikiFact"
}
},
{
"value": "# train",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# train: Number of training instances (e.g., in-context examples).",
"markdown": false,
"metadata": {
"metric": "# train",
"run_group": "WikiFact"
}
},
{
"value": "truncated",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).",
"markdown": false,
"metadata": {
"metric": "truncated",
"run_group": "WikiFact"
}
},
{
"value": "# prompt tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# prompt tokens: Number of tokens in the prompt.",
"markdown": false,
"metadata": {
"metric": "# prompt tokens",
"run_group": "WikiFact"
}
},
{
"value": "# output tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# output tokens: Actual number of output tokens.",
"markdown": false,
"metadata": {
"metric": "# output tokens",
"run_group": "WikiFact"
}
},
{
"value": "# trials",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.",
"markdown": false,
"metadata": {
"metric": "# trials",
"run_group": "WikiFact"
}
}
],
"rows": [
[
{
"value": "EleutherAI/pythia-2.8b",
"description": "",
"href": "?group=wikifact&subgroup=subject%3A%20instance_of&runSpecs=%5B%22wikifact%3Ak%3D5%2Csubject%3Dinstance_of%2Cmodel%3DEleutherAI_pythia-2.8b%22%5D",
"markdown": false,
"run_spec_names": [
"wikifact:k=5,subject=instance_of,model=EleutherAI_pythia-2.8b"
]
},
{
"value": 0.009411764705882352,
"description": "min=0.009, mean=0.009, max=0.009, sum=0.009 (1)",
"style": {
"font-weight": "bold"
},
"markdown": false
},
{
"description": "1 matching runs, but no matching metrics",
"markdown": false
},
{
"value": 850.0,
"description": "min=850, mean=850, max=850, sum=850 (1)",
"style": {},
"markdown": false
},
{
"value": 5.0,
"description": "min=5, mean=5, max=5, sum=5 (1)",
"style": {},
"markdown": false
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (1)",
"style": {},
"markdown": false
},
{
"value": 72.06117647058824,
"description": "min=72.061, mean=72.061, max=72.061, sum=72.061 (1)",
"style": {},
"markdown": false
},
{
"value": 4.012941176470588,
"description": "min=4.013, mean=4.013, max=4.013, sum=4.013 (1)",
"style": {},
"markdown": false
},
{
"value": 1.0,
"description": "min=1, mean=1, max=1, sum=1 (1)",
"style": {},
"markdown": false
}
]
],
"links": [
{
"text": "LaTeX",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/wikifact_wikifact_subject:instance_of.tex"
},
{
"text": "JSON",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/wikifact_wikifact_subject:instance_of.json"
}
],
"name": "wikifact_subject:instance_of"
},
{
"title": "subject: medical_condition_treated",
"header": [
{
"value": "Model",
"markdown": false,
"metadata": {}
},
{
"value": "EM",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.",
"markdown": false,
"lower_is_better": false,
"metadata": {
"metric": "EM",
"run_group": "WikiFact"
}
},
{
"value": "Denoised inference time (s)",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.",
"markdown": false,
"lower_is_better": true,
"metadata": {
"metric": "Denoised inference time (s)",
"run_group": "WikiFact"
}
},
{
"value": "# eval",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# eval: Number of evaluation instances.",
"markdown": false,
"metadata": {
"metric": "# eval",
"run_group": "WikiFact"
}
},
{
"value": "# train",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# train: Number of training instances (e.g., in-context examples).",
"markdown": false,
"metadata": {
"metric": "# train",
"run_group": "WikiFact"
}
},
{
"value": "truncated",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).",
"markdown": false,
"metadata": {
"metric": "truncated",
"run_group": "WikiFact"
}
},
{
"value": "# prompt tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# prompt tokens: Number of tokens in the prompt.",
"markdown": false,
"metadata": {
"metric": "# prompt tokens",
"run_group": "WikiFact"
}
},
{
"value": "# output tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# output tokens: Actual number of output tokens.",
"markdown": false,
"metadata": {
"metric": "# output tokens",
"run_group": "WikiFact"
}
},
{
"value": "# trials",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.",
"markdown": false,
"metadata": {
"metric": "# trials",
"run_group": "WikiFact"
}
}
],
"rows": [
[
{
"value": "EleutherAI/pythia-2.8b",
"description": "",
"href": "?group=wikifact&subgroup=subject%3A%20medical_condition_treated&runSpecs=%5B%22wikifact%3Ak%3D5%2Csubject%3Dmedical_condition_treated%2Cmodel%3DEleutherAI_pythia-2.8b%22%5D",
"markdown": false,
"run_spec_names": [
"wikifact:k=5,subject=medical_condition_treated,model=EleutherAI_pythia-2.8b"
]
},
{
"value": 0.0058823529411764705,
"description": "min=0.006, mean=0.006, max=0.006, sum=0.006 (1)",
"style": {
"font-weight": "bold"
},
"markdown": false
},
{
"description": "1 matching runs, but no matching metrics",
"markdown": false
},
{
"value": 850.0,
"description": "min=850, mean=850, max=850, sum=850 (1)",
"style": {},
"markdown": false
},
{
"value": 5.0,
"description": "min=5, mean=5, max=5, sum=5 (1)",
"style": {},
"markdown": false
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (1)",
"style": {},
"markdown": false
},
{
"value": 85.21058823529411,
"description": "min=85.211, mean=85.211, max=85.211, sum=85.211 (1)",
"style": {},
"markdown": false
},
{
"value": 3.02,
"description": "min=3.02, mean=3.02, max=3.02, sum=3.02 (1)",
"style": {},
"markdown": false
},
{
"value": 1.0,
"description": "min=1, mean=1, max=1, sum=1 (1)",
"style": {},
"markdown": false
}
]
],
"links": [
{
"text": "LaTeX",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/wikifact_wikifact_subject:medical_condition_treated.tex"
},
{
"text": "JSON",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/wikifact_wikifact_subject:medical_condition_treated.json"
}
],
"name": "wikifact_subject:medical_condition_treated"
},
{
"title": "subject: part_of",
"header": [
{
"value": "Model",
"markdown": false,
"metadata": {}
},
{
"value": "EM",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.",
"markdown": false,
"lower_is_better": false,
"metadata": {
"metric": "EM",
"run_group": "WikiFact"
}
},
{
"value": "Denoised inference time (s)",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.",
"markdown": false,
"lower_is_better": true,
"metadata": {
"metric": "Denoised inference time (s)",
"run_group": "WikiFact"
}
},
{
"value": "# eval",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# eval: Number of evaluation instances.",
"markdown": false,
"metadata": {
"metric": "# eval",
"run_group": "WikiFact"
}
},
{
"value": "# train",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# train: Number of training instances (e.g., in-context examples).",
"markdown": false,
"metadata": {
"metric": "# train",
"run_group": "WikiFact"
}
},
{
"value": "truncated",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).",
"markdown": false,
"metadata": {
"metric": "truncated",
"run_group": "WikiFact"
}
},
{
"value": "# prompt tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# prompt tokens: Number of tokens in the prompt.",
"markdown": false,
"metadata": {
"metric": "# prompt tokens",
"run_group": "WikiFact"
}
},
{
"value": "# output tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# output tokens: Actual number of output tokens.",
"markdown": false,
"metadata": {
"metric": "# output tokens",
"run_group": "WikiFact"
}
},
{
"value": "# trials",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.",
"markdown": false,
"metadata": {
"metric": "# trials",
"run_group": "WikiFact"
}
}
],
"rows": [
[
{
"value": "EleutherAI/pythia-2.8b",
"description": "",
"href": "?group=wikifact&subgroup=subject%3A%20part_of&runSpecs=%5B%22wikifact%3Ak%3D5%2Csubject%3Dpart_of%2Cmodel%3DEleutherAI_pythia-2.8b%22%5D",
"markdown": false,
"run_spec_names": [
"wikifact:k=5,subject=part_of,model=EleutherAI_pythia-2.8b"
]
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (1)",
"style": {
"font-weight": "bold"
},
"markdown": false
},
{
"description": "1 matching runs, but no matching metrics",
"markdown": false
},
{
"value": 850.0,
"description": "min=850, mean=850, max=850, sum=850 (1)",
"style": {},
"markdown": false
},
{
"value": 5.0,
"description": "min=5, mean=5, max=5, sum=5 (1)",
"style": {},
"markdown": false
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (1)",
"style": {},
"markdown": false
},
{
"value": 96.1329411764706,
"description": "min=96.133, mean=96.133, max=96.133, sum=96.133 (1)",
"style": {},
"markdown": false
},
{
"value": 5.878823529411765,
"description": "min=5.879, mean=5.879, max=5.879, sum=5.879 (1)",
"style": {},
"markdown": false
},
{
"value": 1.0,
"description": "min=1, mean=1, max=1, sum=1 (1)",
"style": {},
"markdown": false
}
]
],
"links": [
{
"text": "LaTeX",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/wikifact_wikifact_subject:part_of.tex"
},
{
"text": "JSON",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/wikifact_wikifact_subject:part_of.json"
}
],
"name": "wikifact_subject:part_of"
},
{
"title": "subject: place_of_birth",
"header": [
{
"value": "Model",
"markdown": false,
"metadata": {}
},
{
"value": "EM",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.",
"markdown": false,
"lower_is_better": false,
"metadata": {
"metric": "EM",
"run_group": "WikiFact"
}
},
{
"value": "Denoised inference time (s)",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.",
"markdown": false,
"lower_is_better": true,
"metadata": {
"metric": "Denoised inference time (s)",
"run_group": "WikiFact"
}
},
{
"value": "# eval",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# eval: Number of evaluation instances.",
"markdown": false,
"metadata": {
"metric": "# eval",
"run_group": "WikiFact"
}
},
{
"value": "# train",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# train: Number of training instances (e.g., in-context examples).",
"markdown": false,
"metadata": {
"metric": "# train",
"run_group": "WikiFact"
}
},
{
"value": "truncated",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).",
"markdown": false,
"metadata": {
"metric": "truncated",
"run_group": "WikiFact"
}
},
{
"value": "# prompt tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# prompt tokens: Number of tokens in the prompt.",
"markdown": false,
"metadata": {
"metric": "# prompt tokens",
"run_group": "WikiFact"
}
},
{
"value": "# output tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# output tokens: Actual number of output tokens.",
"markdown": false,
"metadata": {
"metric": "# output tokens",
"run_group": "WikiFact"
}
},
{
"value": "# trials",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.",
"markdown": false,
"metadata": {
"metric": "# trials",
"run_group": "WikiFact"
}
}
],
"rows": [
[
{
"value": "EleutherAI/pythia-2.8b",
"description": "",
"href": "?group=wikifact&subgroup=subject%3A%20place_of_birth&runSpecs=%5B%22wikifact%3Ak%3D5%2Csubject%3Dplace_of_birth%2Cmodel%3DEleutherAI_pythia-2.8b%22%5D",
"markdown": false,
"run_spec_names": [
"wikifact:k=5,subject=place_of_birth,model=EleutherAI_pythia-2.8b"
]
},
{
"value": 0.004705882352941176,
"description": "min=0.005, mean=0.005, max=0.005, sum=0.005 (1)",
"style": {
"font-weight": "bold"
},
"markdown": false
},
{
"description": "1 matching runs, but no matching metrics",
"markdown": false
},
{
"value": 850.0,
"description": "min=850, mean=850, max=850, sum=850 (1)",
"style": {},
"markdown": false
},
{
"value": 5.0,
"description": "min=5, mean=5, max=5, sum=5 (1)",
"style": {},
"markdown": false
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (1)",
"style": {},
"markdown": false
},
{
"value": 65.85529411764706,
"description": "min=65.855, mean=65.855, max=65.855, sum=65.855 (1)",
"style": {},
"markdown": false
},
{
"value": 3.1588235294117646,
"description": "min=3.159, mean=3.159, max=3.159, sum=3.159 (1)",
"style": {},
"markdown": false
},
{
"value": 1.0,
"description": "min=1, mean=1, max=1, sum=1 (1)",
"style": {},
"markdown": false
}
]
],
"links": [
{
"text": "LaTeX",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/wikifact_wikifact_subject:place_of_birth.tex"
},
{
"text": "JSON",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/wikifact_wikifact_subject:place_of_birth.json"
}
],
"name": "wikifact_subject:place_of_birth"
},
{
"title": "subject: plaintiff",
"header": [
{
"value": "Model",
"markdown": false,
"metadata": {}
},
{
"value": "EM",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.",
"markdown": false,
"lower_is_better": false,
"metadata": {
"metric": "EM",
"run_group": "WikiFact"
}
},
{
"value": "Denoised inference time (s)",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.",
"markdown": false,
"lower_is_better": true,
"metadata": {
"metric": "Denoised inference time (s)",
"run_group": "WikiFact"
}
},
{
"value": "# eval",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# eval: Number of evaluation instances.",
"markdown": false,
"metadata": {
"metric": "# eval",
"run_group": "WikiFact"
}
},
{
"value": "# train",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# train: Number of training instances (e.g., in-context examples).",
"markdown": false,
"metadata": {
"metric": "# train",
"run_group": "WikiFact"
}
},
{
"value": "truncated",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).",
"markdown": false,
"metadata": {
"metric": "truncated",
"run_group": "WikiFact"
}
},
{
"value": "# prompt tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# prompt tokens: Number of tokens in the prompt.",
"markdown": false,
"metadata": {
"metric": "# prompt tokens",
"run_group": "WikiFact"
}
},
{
"value": "# output tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# output tokens: Actual number of output tokens.",
"markdown": false,
"metadata": {
"metric": "# output tokens",
"run_group": "WikiFact"
}
},
{
"value": "# trials",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.",
"markdown": false,
"metadata": {
"metric": "# trials",
"run_group": "WikiFact"
}
}
],
"rows": [
[
{
"value": "EleutherAI/pythia-2.8b",
"description": "",
"href": "?group=wikifact&subgroup=subject%3A%20plaintiff&runSpecs=%5B%22wikifact%3Ak%3D5%2Csubject%3Dplaintiff%2Cmodel%3DEleutherAI_pythia-2.8b%22%5D",
"markdown": false,
"run_spec_names": [
"wikifact:k=5,subject=plaintiff,model=EleutherAI_pythia-2.8b"
]
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (1)",
"style": {
"font-weight": "bold"
},
"markdown": false
},
{
"description": "1 matching runs, but no matching metrics",
"markdown": false
},
{
"value": 96.0,
"description": "min=96, mean=96, max=96, sum=96 (1)",
"style": {},
"markdown": false
},
{
"value": 5.0,
"description": "min=5, mean=5, max=5, sum=5 (1)",
"style": {},
"markdown": false
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (1)",
"style": {},
"markdown": false
},
{
"value": 128.29166666666666,
"description": "min=128.292, mean=128.292, max=128.292, sum=128.292 (1)",
"style": {},
"markdown": false
},
{
"value": 5.885416666666667,
"description": "min=5.885, mean=5.885, max=5.885, sum=5.885 (1)",
"style": {},
"markdown": false
},
{
"value": 1.0,
"description": "min=1, mean=1, max=1, sum=1 (1)",
"style": {},
"markdown": false
}
]
],
"links": [
{
"text": "LaTeX",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/wikifact_wikifact_subject:plaintiff.tex"
},
{
"text": "JSON",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/wikifact_wikifact_subject:plaintiff.json"
}
],
"name": "wikifact_subject:plaintiff"
},
{
"title": "subject: position_held",
"header": [
{
"value": "Model",
"markdown": false,
"metadata": {}
},
{
"value": "EM",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.",
"markdown": false,
"lower_is_better": false,
"metadata": {
"metric": "EM",
"run_group": "WikiFact"
}
},
{
"value": "Denoised inference time (s)",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.",
"markdown": false,
"lower_is_better": true,
"metadata": {
"metric": "Denoised inference time (s)",
"run_group": "WikiFact"
}
},
{
"value": "# eval",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# eval: Number of evaluation instances.",
"markdown": false,
"metadata": {
"metric": "# eval",
"run_group": "WikiFact"
}
},
{
"value": "# train",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# train: Number of training instances (e.g., in-context examples).",
"markdown": false,
"metadata": {
"metric": "# train",
"run_group": "WikiFact"
}
},
{
"value": "truncated",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).",
"markdown": false,
"metadata": {
"metric": "truncated",
"run_group": "WikiFact"
}
},
{
"value": "# prompt tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# prompt tokens: Number of tokens in the prompt.",
"markdown": false,
"metadata": {
"metric": "# prompt tokens",
"run_group": "WikiFact"
}
},
{
"value": "# output tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# output tokens: Actual number of output tokens.",
"markdown": false,
"metadata": {
"metric": "# output tokens",
"run_group": "WikiFact"
}
},
{
"value": "# trials",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.",
"markdown": false,
"metadata": {
"metric": "# trials",
"run_group": "WikiFact"
}
}
],
"rows": [
[
{
"value": "EleutherAI/pythia-2.8b",
"description": "",
"href": "?group=wikifact&subgroup=subject%3A%20position_held&runSpecs=%5B%22wikifact%3Ak%3D5%2Csubject%3Dposition_held%2Cmodel%3DEleutherAI_pythia-2.8b%22%5D",
"markdown": false,
"run_spec_names": [
"wikifact:k=5,subject=position_held,model=EleutherAI_pythia-2.8b"
]
},
{
"value": 0.011764705882352941,
"description": "min=0.012, mean=0.012, max=0.012, sum=0.012 (1)",
"style": {
"font-weight": "bold"
},
"markdown": false
},
{
"description": "1 matching runs, but no matching metrics",
"markdown": false
},
{
"value": 850.0,
"description": "min=850, mean=850, max=850, sum=850 (1)",
"style": {},
"markdown": false
},
{
"value": 5.0,
"description": "min=5, mean=5, max=5, sum=5 (1)",
"style": {},
"markdown": false
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (1)",
"style": {},
"markdown": false
},
{
"value": 89.04588235294118,
"description": "min=89.046, mean=89.046, max=89.046, sum=89.046 (1)",
"style": {},
"markdown": false
},
{
"value": 6.331764705882353,
"description": "min=6.332, mean=6.332, max=6.332, sum=6.332 (1)",
"style": {},
"markdown": false
},
{
"value": 1.0,
"description": "min=1, mean=1, max=1, sum=1 (1)",
"style": {},
"markdown": false
}
]
],
"links": [
{
"text": "LaTeX",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/wikifact_wikifact_subject:position_held.tex"
},
{
"text": "JSON",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/wikifact_wikifact_subject:position_held.json"
}
],
"name": "wikifact_subject:position_held"
},
{
"title": "subject: symptoms_and_signs",
"header": [
{
"value": "Model",
"markdown": false,
"metadata": {}
},
{
"value": "EM",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.",
"markdown": false,
"lower_is_better": false,
"metadata": {
"metric": "EM",
"run_group": "WikiFact"
}
},
{
"value": "Denoised inference time (s)",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.",
"markdown": false,
"lower_is_better": true,
"metadata": {
"metric": "Denoised inference time (s)",
"run_group": "WikiFact"
}
},
{
"value": "# eval",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# eval: Number of evaluation instances.",
"markdown": false,
"metadata": {
"metric": "# eval",
"run_group": "WikiFact"
}
},
{
"value": "# train",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# train: Number of training instances (e.g., in-context examples).",
"markdown": false,
"metadata": {
"metric": "# train",
"run_group": "WikiFact"
}
},
{
"value": "truncated",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).",
"markdown": false,
"metadata": {
"metric": "truncated",
"run_group": "WikiFact"
}
},
{
"value": "# prompt tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# prompt tokens: Number of tokens in the prompt.",
"markdown": false,
"metadata": {
"metric": "# prompt tokens",
"run_group": "WikiFact"
}
},
{
"value": "# output tokens",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# output tokens: Actual number of output tokens.",
"markdown": false,
"metadata": {
"metric": "# output tokens",
"run_group": "WikiFact"
}
},
{
"value": "# trials",
"description": "Scenario introduced in this work, inspired by [Petroni et al. (2019)](https://aclanthology.org/D19-1250/), to more extensively test factual knowledge.\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.",
"markdown": false,
"metadata": {
"metric": "# trials",
"run_group": "WikiFact"
}
}
],
"rows": [
[
{
"value": "EleutherAI/pythia-2.8b",
"description": "",
"href": "?group=wikifact&subgroup=subject%3A%20symptoms_and_signs&runSpecs=%5B%22wikifact%3Ak%3D5%2Csubject%3Dsymptoms_and_signs%2Cmodel%3DEleutherAI_pythia-2.8b%22%5D",
"markdown": false,
"run_spec_names": [
"wikifact:k=5,subject=symptoms_and_signs,model=EleutherAI_pythia-2.8b"
]
},
{
"value": 0.03356890459363958,
"description": "min=0.034, mean=0.034, max=0.034, sum=0.034 (1)",
"style": {
"font-weight": "bold"
},
"markdown": false
},
{
"description": "1 matching runs, but no matching metrics",
"markdown": false
},
{
"value": 566.0,
"description": "min=566, mean=566, max=566, sum=566 (1)",
"style": {},
"markdown": false
},
{
"value": 5.0,
"description": "min=5, mean=5, max=5, sum=5 (1)",
"style": {},
"markdown": false
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (1)",
"style": {},
"markdown": false
},
{
"value": 70.64840989399293,
"description": "min=70.648, mean=70.648, max=70.648, sum=70.648 (1)",
"style": {},
"markdown": false
},
{
"value": 3.8321554770318023,
"description": "min=3.832, mean=3.832, max=3.832, sum=3.832 (1)",
"style": {},
"markdown": false
},
{
"value": 1.0,
"description": "min=1, mean=1, max=1, sum=1 (1)",
"style": {},
"markdown": false
}
]
],
"links": [
{
"text": "LaTeX",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/latex/wikifact_wikifact_subject:symptoms_and_signs.tex"
},
{
"text": "JSON",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2000/groups/json/wikifact_wikifact_subject:symptoms_and_signs.json"
}
],
"name": "wikifact_subject:symptoms_and_signs"
}
]