evalita_llm_results / CohereForAI /CohereForAI_aya-expanse-8b_None.json
rzanoli's picture
Include MAIA tasks into Evalita tasks
cf26c17
{
"average_CPS": 39.848225,
"config": {
"model_name": "CohereForAI/aya-expanse-8b",
"num_fewshot": "None",
"batch_size": 1,
"model": "CohereForAI/aya-expanse-8b",
"base_model": "CohereForCausalLM",
"revision": "554c52e22d0f713bab9d3e360734d25cd15dda16",
"multimodal": false,
"submitted_time": "2024-10-23 06:34:13+00:00",
"num_params_billion": 8.028033024,
"language": "en_fr_de_es_it_pt_ja_ko_zh_ar_el_fa_pl_id_cs_he_hi_nl_ro_ru_tr_uk_vi"
},
"tasks": {
"admission-test": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 27.200000000000003,
"stderr": 0.0199
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 28.799999999999997,
"stderr": 0.0203
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": 57.4,
"stderr": 0.0221
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": 57.8,
"stderr": 0.0221
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": 30.4,
"stderr": 0.0206
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": 29.599999999999998,
"stderr": 0.0204
}
],
"average_accuracy": 38.53333333333333,
"best_prompt": 57.8,
"prompt_id": "prompt-4",
"CPS": 46.663866666666664,
"std_accuracy": 14.807385544608023
},
"faq": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 29.68,
"stderr": 0.0228
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 29.93,
"stderr": 0.0229
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": 41.4,
"stderr": 0.0246
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": 66.33,
"stderr": 0.0236
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": 29.93,
"stderr": 0.0229
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": 30.919999999999998,
"stderr": 0.0231
}
],
"average_accuracy": 38.03166666666666,
"best_prompt": 66.33,
"prompt_id": "prompt-4",
"CPS": 47.559715499999996,
"std_accuracy": 14.585893756183289
},
"hate-speech-detection": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 59.489999999999995,
"stderr": 0.0131
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 60.92999999999999,
"stderr": 0.0122
},
{
"prompt": "prompt-3",
"metric": "f1",
"value": 54.690000000000005,
"stderr": 0.0133
},
{
"prompt": "prompt-4",
"metric": "f1",
"value": 62.970000000000006,
"stderr": 0.0112
},
{
"prompt": "prompt-5",
"metric": "f1",
"value": 62.92,
"stderr": 0.0114
},
{
"prompt": "prompt-6",
"metric": "f1",
"value": 61.89,
"stderr": 0.0115
}
],
"average_accuracy": 60.48166666666666,
"best_prompt": 62.970000000000006,
"prompt_id": "prompt-4",
"CPS": 61.4030965,
"std_accuracy": 3.125651398775408
},
"lexical-substitution": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 7.33,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 15.920000000000002,
"stderr": null
}
],
"average_accuracy": 11.625,
"best_prompt": 15.920000000000002,
"prompt_id": "prompt-2",
"CPS": 15.236236000000002,
"std_accuracy": 6.074047250392445
},
"evalita NER": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 15.216677169171108,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 19.49798473097431,
"stderr": null
}
],
"average_accuracy": 17.357330950072708,
"best_prompt": 19.49798473097431,
"prompt_id": "prompt-2",
"CPS": 19.08060038363109,
"std_accuracy": 3.0273416092962875
},
"relation-extraction": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 5.47,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 9.569999999999999,
"stderr": null
}
],
"average_accuracy": 7.52,
"best_prompt": 9.569999999999999,
"prompt_id": "prompt-2",
"CPS": 9.373814999999999,
"std_accuracy": 2.899137802864844
},
"sentiment-analysis": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 45.09,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 47.010000000000005,
"stderr": null
},
{
"prompt": "prompt-3",
"metric": "f1",
"value": 66.27,
"stderr": null
},
{
"prompt": "prompt-4",
"metric": "f1",
"value": 70.62,
"stderr": null
},
{
"prompt": "prompt-5",
"metric": "f1",
"value": 64.92999999999999,
"stderr": null
},
{
"prompt": "prompt-6",
"metric": "f1",
"value": 67.02,
"stderr": null
}
],
"average_accuracy": 60.156666666666666,
"best_prompt": 70.62,
"prompt_id": "prompt-4",
"CPS": 63.230794,
"std_accuracy": 11.104805566360296
},
"summarization-fanpage": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "rouge1",
"value": 18.48,
"stderr": 0.0073
},
{
"prompt": "prompt-2",
"metric": "rouge1",
"value": 19.21,
"stderr": 0.0075
}
],
"average_accuracy": 18.845,
"best_prompt": 19.21,
"prompt_id": "prompt-2",
"CPS": 19.1398835,
"std_accuracy": 0.51618795026618
},
"text-entailment": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 75.0,
"stderr": 0.0217
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 65.75,
"stderr": 0.0238
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": 64.75,
"stderr": 0.0239
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": 60.25,
"stderr": 0.0245
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": 52.5,
"stderr": 0.025
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": 43.75,
"stderr": 0.0248
}
],
"average_accuracy": 60.333333333333336,
"best_prompt": 75.0,
"prompt_id": "prompt-1",
"CPS": 64.0,
"std_accuracy": 10.957493630692499
},
"word-in-context": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 9.02,
"stderr": 0.0242
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 5.34,
"stderr": 0.0194
},
{
"prompt": "prompt-3",
"metric": "f1",
"value": 66.19,
"stderr": 0.0208
},
{
"prompt": "prompt-4",
"metric": "f1",
"value": 63.21,
"stderr": 0.0221
},
{
"prompt": "prompt-5",
"metric": "f1",
"value": 65.82000000000001,
"stderr": 0.0206
},
{
"prompt": "prompt-6",
"metric": "f1",
"value": 66.13,
"stderr": 0.0201
}
],
"average_accuracy": 45.95166666666666,
"best_prompt": 66.19,
"prompt_id": "prompt-3",
"CPS": 52.794247166666665,
"std_accuracy": 30.07527716691347
},
"MAIA-MC": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": null,
"stderr": null
}
],
"average_accuracy": null,
"best_prompt": null,
"prompt_id": null,
"CPS": null,
"std_accuracy": null
},
"MAIA-GEN": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "rouge1",
"value": null,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "rouge1",
"value": null,
"stderr": null
}
],
"average_accuracy": null,
"best_prompt": null,
"prompt_id": null,
"CPS": null,
"std_accuracy": null
}
}
}