yuhengtu's picture
Upload folder using huggingface_hub
2169461 verified
[
{
"title": "Accuracy",
"header": [
{
"value": "Model",
"markdown": false,
"metadata": {}
},
{
"value": "Mean win rate",
"description": "How many models this model outperforms on average (over columns).",
"markdown": false,
"lower_is_better": false,
"metadata": {}
},
{
"value": "CivilComments - EM",
"description": "The CivilComments benchmark for toxicity detection [(Borkan et al., 2019)](https://arxiv.org/pdf/1903.04561.pdf).\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.",
"markdown": false,
"lower_is_better": false,
"metadata": {
"metric": "EM",
"run_group": "CivilComments"
}
}
],
"rows": [
[
{
"value": "EleutherAI/pythia-2.8b",
"description": "",
"markdown": false
},
{
"markdown": false
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (18)",
"style": {
"font-weight": "bold"
},
"markdown": false,
"run_spec_names": [
"civil_comments:demographic=LGBTQ,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=all,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=black,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=christian,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=female,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=male,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=muslim,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=other_religions,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=white,model=EleutherAI_pythia-2.8b,data_augmentation=canonical"
]
}
]
],
"links": [
{
"text": "LaTeX",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2/groups/latex/toxicity_detection_accuracy.tex"
},
{
"text": "JSON",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2/groups/json/toxicity_detection_accuracy.json"
}
],
"name": "accuracy"
},
{
"title": "Calibration",
"header": [
{
"value": "Model",
"markdown": false,
"metadata": {}
},
{
"value": "Mean win rate",
"description": "How many models this model outperforms on average (over columns).",
"markdown": false,
"lower_is_better": false,
"metadata": {}
},
{
"value": "CivilComments - ECE (10-bin)",
"description": "The CivilComments benchmark for toxicity detection [(Borkan et al., 2019)](https://arxiv.org/pdf/1903.04561.pdf).\n\n10-bin expected calibration error: The average difference between the model's confidence and accuracy, averaged across 10 bins where each bin contains an equal number of points (only computed for classification tasks). Warning - not reliable for small datasets (e.g., with < 300 examples) because each bin will have very few examples.",
"markdown": false,
"lower_is_better": true,
"metadata": {
"metric": "ECE (10-bin)",
"run_group": "CivilComments"
}
}
],
"rows": [
[
{
"value": "EleutherAI/pythia-2.8b",
"description": "",
"markdown": false
},
{
"markdown": false
},
{
"value": 3.3724591072781132e-12,
"description": "min=0.0, mean=0.0, max=0.0, sum=0.0 (18)",
"style": {
"font-weight": "bold"
},
"markdown": false,
"run_spec_names": [
"civil_comments:demographic=LGBTQ,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=all,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=black,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=christian,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=female,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=male,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=muslim,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=other_religions,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=white,model=EleutherAI_pythia-2.8b,data_augmentation=canonical"
]
}
]
],
"links": [
{
"text": "LaTeX",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2/groups/latex/toxicity_detection_calibration.tex"
},
{
"text": "JSON",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2/groups/json/toxicity_detection_calibration.json"
}
],
"name": "calibration"
},
{
"title": "Robustness",
"header": [
{
"value": "Model",
"markdown": false,
"metadata": {}
},
{
"value": "Mean win rate",
"description": "How many models this model outperforms on average (over columns).",
"markdown": false,
"lower_is_better": false,
"metadata": {}
},
{
"value": "CivilComments - EM (Robustness)",
"description": "The CivilComments benchmark for toxicity detection [(Borkan et al., 2019)](https://arxiv.org/pdf/1903.04561.pdf).\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.\n- Perturbation Robustness: Computes worst case over different robustness perturbations (misspellings, formatting, contrast sets).",
"markdown": false,
"lower_is_better": false,
"metadata": {
"metric": "EM",
"run_group": "CivilComments",
"perturbation": "Robustness"
}
}
],
"rows": [
[
{
"value": "EleutherAI/pythia-2.8b",
"description": "",
"markdown": false
},
{
"markdown": false
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (18)",
"style": {
"font-weight": "bold"
},
"markdown": false,
"run_spec_names": [
"civil_comments:demographic=LGBTQ,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=all,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=black,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=christian,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=female,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=male,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=muslim,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=other_religions,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=white,model=EleutherAI_pythia-2.8b,data_augmentation=canonical"
]
}
]
],
"links": [
{
"text": "LaTeX",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2/groups/latex/toxicity_detection_robustness.tex"
},
{
"text": "JSON",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2/groups/json/toxicity_detection_robustness.json"
}
],
"name": "robustness"
},
{
"title": "Fairness",
"header": [
{
"value": "Model",
"markdown": false,
"metadata": {}
},
{
"value": "Mean win rate",
"description": "How many models this model outperforms on average (over columns).",
"markdown": false,
"lower_is_better": false,
"metadata": {}
},
{
"value": "CivilComments - EM (Fairness)",
"description": "The CivilComments benchmark for toxicity detection [(Borkan et al., 2019)](https://arxiv.org/pdf/1903.04561.pdf).\n\nQuasi-exact match: Fraction of instances that the predicted output matches a correct reference up to light processing.\n- Perturbation Fairness: Computes worst case over different fairness perturbations (changing dialect, race of names, gender).",
"markdown": false,
"lower_is_better": false,
"metadata": {
"metric": "EM",
"run_group": "CivilComments",
"perturbation": "Fairness"
}
}
],
"rows": [
[
{
"value": "EleutherAI/pythia-2.8b",
"description": "",
"markdown": false
},
{
"markdown": false
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (18)",
"style": {
"font-weight": "bold"
},
"markdown": false,
"run_spec_names": [
"civil_comments:demographic=LGBTQ,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=all,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=black,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=christian,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=female,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=male,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=muslim,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=other_religions,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=white,model=EleutherAI_pythia-2.8b,data_augmentation=canonical"
]
}
]
],
"links": [
{
"text": "LaTeX",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2/groups/latex/toxicity_detection_fairness.tex"
},
{
"text": "JSON",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2/groups/json/toxicity_detection_fairness.json"
}
],
"name": "fairness"
},
{
"title": "Bias",
"header": [
{
"value": "Model",
"markdown": false,
"metadata": {}
},
{
"value": "Mean win rate",
"description": "How many models this model outperforms on average (over columns).",
"markdown": false,
"lower_is_better": false,
"metadata": {}
},
{
"value": "CivilComments - Stereotypes (race)",
"description": "The CivilComments benchmark for toxicity detection [(Borkan et al., 2019)](https://arxiv.org/pdf/1903.04561.pdf).\n\nStereotypical associations (race, profession): Measures uneven association of racial groups (Asian, Hispanic, White) with target professions. This measurement is based on cooccurence statistics between the racially-associated names (word list from [Garg et al., 2018](https://www.pnas.org/doi/10.1073/pnas.1720347115); race associations based on US Census statistics) and the target professions (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).",
"markdown": false,
"lower_is_better": true,
"metadata": {
"metric": "Stereotypes (race)",
"run_group": "CivilComments"
}
},
{
"value": "CivilComments - Stereotypes (gender)",
"description": "The CivilComments benchmark for toxicity detection [(Borkan et al., 2019)](https://arxiv.org/pdf/1903.04561.pdf).\n\nStereotypical associations (gender, profession): Measures uneven association of gender groups (male, female) with target professions. This measurement is based on cooccurence statistics between the gender terms (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)) and the target professions (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).",
"markdown": false,
"lower_is_better": true,
"metadata": {
"metric": "Stereotypes (gender)",
"run_group": "CivilComments"
}
},
{
"value": "CivilComments - Representation (race)",
"description": "The CivilComments benchmark for toxicity detection [(Borkan et al., 2019)](https://arxiv.org/pdf/1903.04561.pdf).\n\nDemographic representation (race): Measures uneven representation of racial groups (Asian, Hispanic, White). This measurement is based on disparities in the frequency statistics across racially-associated names (word list from [Garg et al., 2018](https://www.pnas.org/doi/10.1073/pnas.1720347115); race associations based on US Census statistics).",
"markdown": false,
"lower_is_better": true,
"metadata": {
"metric": "Representation (race)",
"run_group": "CivilComments"
}
},
{
"value": "CivilComments - Representation (gender)",
"description": "The CivilComments benchmark for toxicity detection [(Borkan et al., 2019)](https://arxiv.org/pdf/1903.04561.pdf).\n\nDemographic representation (gender): Measures uneven representation of gender groups (male, female). This measurement is based on disparities in the frequency statistics across gender terms (word list from [Bolukbasi et al., 2016](https://papers.nips.cc/paper/2016/hash/a486cd07e4ac3d270571622f4f316ec5-Abstract.html)).",
"markdown": false,
"lower_is_better": true,
"metadata": {
"metric": "Representation (gender)",
"run_group": "CivilComments"
}
}
],
"rows": [
[
{
"value": "EleutherAI/pythia-2.8b",
"description": "",
"markdown": false
},
{
"markdown": false
},
{
"description": "(0)",
"style": {},
"markdown": false,
"run_spec_names": [
"civil_comments:demographic=LGBTQ,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=all,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=black,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=christian,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=female,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=male,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=muslim,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=other_religions,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=white,model=EleutherAI_pythia-2.8b,data_augmentation=canonical"
]
},
{
"description": "(0)",
"style": {},
"markdown": false,
"run_spec_names": [
"civil_comments:demographic=LGBTQ,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=all,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=black,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=christian,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=female,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=male,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=muslim,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=other_religions,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=white,model=EleutherAI_pythia-2.8b,data_augmentation=canonical"
]
},
{
"description": "(0)",
"style": {},
"markdown": false,
"run_spec_names": [
"civil_comments:demographic=LGBTQ,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=all,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=black,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=christian,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=female,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=male,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=muslim,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=other_religions,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=white,model=EleutherAI_pythia-2.8b,data_augmentation=canonical"
]
},
{
"description": "(0)",
"style": {},
"markdown": false,
"run_spec_names": [
"civil_comments:demographic=LGBTQ,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=all,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=black,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=christian,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=female,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=male,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=muslim,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=other_religions,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=white,model=EleutherAI_pythia-2.8b,data_augmentation=canonical"
]
}
]
],
"links": [
{
"text": "LaTeX",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2/groups/latex/toxicity_detection_bias.tex"
},
{
"text": "JSON",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2/groups/json/toxicity_detection_bias.json"
}
],
"name": "bias"
},
{
"title": "Toxicity",
"header": [
{
"value": "Model",
"markdown": false,
"metadata": {}
},
{
"value": "Mean win rate",
"description": "How many models this model outperforms on average (over columns).",
"markdown": false,
"lower_is_better": false,
"metadata": {}
},
{
"value": "CivilComments - Toxic fraction",
"description": "The CivilComments benchmark for toxicity detection [(Borkan et al., 2019)](https://arxiv.org/pdf/1903.04561.pdf).\n\nToxic fraction: Fraction of model outputs that are toxic (based on the PerspectiveAPI toxicity classifier).",
"markdown": false,
"lower_is_better": true,
"metadata": {
"metric": "Toxic fraction",
"run_group": "CivilComments"
}
}
],
"rows": [
[
{
"value": "EleutherAI/pythia-2.8b",
"description": "",
"markdown": false
},
{
"markdown": false
},
{
"description": "9 matching runs, but no matching metrics",
"markdown": false
}
]
],
"links": [
{
"text": "LaTeX",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2/groups/latex/toxicity_detection_toxicity.tex"
},
{
"text": "JSON",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2/groups/json/toxicity_detection_toxicity.json"
}
],
"name": "toxicity"
},
{
"title": "Efficiency",
"header": [
{
"value": "Model",
"markdown": false,
"metadata": {}
},
{
"value": "Mean win rate",
"description": "How many models this model outperforms on average (over columns).",
"markdown": false,
"lower_is_better": false,
"metadata": {}
},
{
"value": "CivilComments - Denoised inference time (s)",
"description": "The CivilComments benchmark for toxicity detection [(Borkan et al., 2019)](https://arxiv.org/pdf/1903.04561.pdf).\n\nDenoised inference runtime (s): Average time to process a request to the model minus performance contention by using profiled runtimes from multiple trials of SyntheticEfficiencyScenario.",
"markdown": false,
"lower_is_better": true,
"metadata": {
"metric": "Denoised inference time (s)",
"run_group": "CivilComments"
}
}
],
"rows": [
[
{
"value": "EleutherAI/pythia-2.8b",
"description": "",
"markdown": false
},
{
"markdown": false
},
{
"description": "9 matching runs, but no matching metrics",
"markdown": false
}
]
],
"links": [
{
"text": "LaTeX",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2/groups/latex/toxicity_detection_efficiency.tex"
},
{
"text": "JSON",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2/groups/json/toxicity_detection_efficiency.json"
}
],
"name": "efficiency"
},
{
"title": "General information",
"header": [
{
"value": "Model",
"markdown": false,
"metadata": {}
},
{
"value": "Mean win rate",
"description": "How many models this model outperforms on average (over columns).",
"markdown": false,
"lower_is_better": false,
"metadata": {}
},
{
"value": "CivilComments - # eval",
"description": "The CivilComments benchmark for toxicity detection [(Borkan et al., 2019)](https://arxiv.org/pdf/1903.04561.pdf).\n\n# eval: Number of evaluation instances.",
"markdown": false,
"metadata": {
"metric": "# eval",
"run_group": "CivilComments"
}
},
{
"value": "CivilComments - # train",
"description": "The CivilComments benchmark for toxicity detection [(Borkan et al., 2019)](https://arxiv.org/pdf/1903.04561.pdf).\n\n# train: Number of training instances (e.g., in-context examples).",
"markdown": false,
"metadata": {
"metric": "# train",
"run_group": "CivilComments"
}
},
{
"value": "CivilComments - truncated",
"description": "The CivilComments benchmark for toxicity detection [(Borkan et al., 2019)](https://arxiv.org/pdf/1903.04561.pdf).\n\ntruncated: Fraction of instances where the prompt itself was truncated (implies that there were no in-context examples).",
"markdown": false,
"metadata": {
"metric": "truncated",
"run_group": "CivilComments"
}
},
{
"value": "CivilComments - # prompt tokens",
"description": "The CivilComments benchmark for toxicity detection [(Borkan et al., 2019)](https://arxiv.org/pdf/1903.04561.pdf).\n\n# prompt tokens: Number of tokens in the prompt.",
"markdown": false,
"metadata": {
"metric": "# prompt tokens",
"run_group": "CivilComments"
}
},
{
"value": "CivilComments - # output tokens",
"description": "The CivilComments benchmark for toxicity detection [(Borkan et al., 2019)](https://arxiv.org/pdf/1903.04561.pdf).\n\n# output tokens: Actual number of output tokens.",
"markdown": false,
"metadata": {
"metric": "# output tokens",
"run_group": "CivilComments"
}
},
{
"value": "CivilComments - # trials",
"description": "The CivilComments benchmark for toxicity detection [(Borkan et al., 2019)](https://arxiv.org/pdf/1903.04561.pdf).\n\n# trials: Number of trials, where in each trial we choose an independent, random set of training instances.",
"markdown": false,
"metadata": {
"metric": "# trials",
"run_group": "CivilComments"
}
}
],
"rows": [
[
{
"value": "EleutherAI/pythia-2.8b",
"description": "",
"markdown": false
},
{
"markdown": false
},
{
"value": 371.55555555555554,
"description": "min=74, mean=371.556, max=683, sum=6688 (18)",
"style": {},
"markdown": false,
"run_spec_names": [
"civil_comments:demographic=LGBTQ,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=all,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=black,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=christian,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=female,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=male,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=muslim,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=other_religions,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=white,model=EleutherAI_pythia-2.8b,data_augmentation=canonical"
]
},
{
"value": 5.0,
"description": "min=5, mean=5, max=5, sum=90 (18)",
"style": {},
"markdown": false,
"run_spec_names": [
"civil_comments:demographic=LGBTQ,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=all,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=black,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=christian,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=female,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=male,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=muslim,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=other_religions,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=white,model=EleutherAI_pythia-2.8b,data_augmentation=canonical"
]
},
{
"value": 0.0,
"description": "min=0, mean=0, max=0, sum=0 (18)",
"style": {},
"markdown": false,
"run_spec_names": [
"civil_comments:demographic=LGBTQ,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=all,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=black,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=christian,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=female,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=male,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=muslim,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=other_religions,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=white,model=EleutherAI_pythia-2.8b,data_augmentation=canonical"
]
},
{
"value": 771.6539847352628,
"description": "min=360.976, mean=771.654, max=1282.4, sum=13889.772 (18)",
"style": {},
"markdown": false,
"run_spec_names": [
"civil_comments:demographic=LGBTQ,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=all,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=black,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=christian,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=female,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=male,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=muslim,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=other_religions,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=white,model=EleutherAI_pythia-2.8b,data_augmentation=canonical"
]
},
{
"value": 4.999712147380541,
"description": "min=4.995, mean=5.0, max=5, sum=89.995 (18)",
"style": {},
"markdown": false,
"run_spec_names": [
"civil_comments:demographic=LGBTQ,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=all,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=black,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=christian,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=female,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=male,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=muslim,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=other_religions,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=white,model=EleutherAI_pythia-2.8b,data_augmentation=canonical"
]
},
{
"value": 1.0,
"description": "min=1, mean=1, max=1, sum=18 (18)",
"style": {},
"markdown": false,
"run_spec_names": [
"civil_comments:demographic=LGBTQ,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=all,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=black,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=christian,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=female,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=male,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=muslim,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=other_religions,model=EleutherAI_pythia-2.8b,data_augmentation=canonical",
"civil_comments:demographic=white,model=EleutherAI_pythia-2.8b,data_augmentation=canonical"
]
}
]
],
"links": [
{
"text": "LaTeX",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2/groups/latex/toxicity_detection_general_information.tex"
},
{
"text": "JSON",
"href": "benchmark_output/runs/classic_pythia-2.8b-step2/groups/json/toxicity_detection_general_information.json"
}
],
"name": "general_information"
}
]