evalita_llm_results / Fastweb /Fastweb_FastwebMIIA-7B_5.json
rzanoli's picture
Include MAIA tasks into Evalita tasks
cf26c17
{
"average_CPS": 42.560957,
"config": {
"model_name": "Fastweb/FastwebMIIA-7B",
"num_fewshot": "5",
"batch_size": 1,
"model": "Fastweb/FastwebMIIA-7B",
"base_model": "MistralForCausalLM",
"revision": "b2f5d54180b117f9b15fad8e6306e10b2e7f6324",
"multimodal": false,
"submitted_time": "2025-05-30 08:51:45+00:00",
"num_params_billion": 7.391399936,
"language": "it_en"
},
"tasks": {
"admission-test": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 39.800000000000004,
"stderr": 0.0219
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 38.4,
"stderr": 0.0218
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": 55.800000000000004,
"stderr": 0.0222
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": 54.6,
"stderr": 0.0223
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": 40.0,
"stderr": 0.0219
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": 38.2,
"stderr": 0.0218
}
],
"average_accuracy": 44.46666666666667,
"best_prompt": 55.800000000000004,
"prompt_id": "prompt-3",
"CPS": 49.476,
"std_accuracy": 8.35384143174065
},
"faq": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 27.93,
"stderr": 0.0224
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 26.93,
"stderr": 0.0222
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": 65.84,
"stderr": 0.0237
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": 63.59,
"stderr": 0.0241
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": 28.68,
"stderr": 0.0226
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": 27.43,
"stderr": 0.0223
}
],
"average_accuracy": 40.06666666666667,
"best_prompt": 65.84,
"prompt_id": "prompt-3",
"CPS": 48.870837333333334,
"std_accuracy": 19.11451874012701
},
"hate-speech-detection": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 46.03,
"stderr": 0.0158
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 50.629999999999995,
"stderr": 0.015
},
{
"prompt": "prompt-3",
"metric": "f1",
"value": 62.31,
"stderr": 0.0113
},
{
"prompt": "prompt-4",
"metric": "f1",
"value": 62.150000000000006,
"stderr": 0.0114
},
{
"prompt": "prompt-5",
"metric": "f1",
"value": 38.31,
"stderr": 0.0173
},
{
"prompt": "prompt-6",
"metric": "f1",
"value": 38.75,
"stderr": 0.0175
}
],
"average_accuracy": 49.696666666666665,
"best_prompt": 62.31,
"prompt_id": "prompt-3",
"CPS": 54.450632,
"std_accuracy": 10.751585309463284
},
"lexical-substitution": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 11.940000000000001,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 11.04,
"stderr": null
}
],
"average_accuracy": 11.49,
"best_prompt": 11.940000000000001,
"prompt_id": "prompt-1",
"CPS": 11.886270000000001,
"std_accuracy": 0.6363961030678943
},
"evalita NER": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 29.239402569074162,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 27.625167232186136,
"stderr": null
}
],
"average_accuracy": 28.43228490063015,
"best_prompt": 29.239402569074162,
"prompt_id": "prompt-1",
"CPS": 29.003406184791693,
"std_accuracy": 1.141436753144474
},
"relation-extraction": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 29.93,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 26.38,
"stderr": null
}
],
"average_accuracy": 28.155,
"best_prompt": 29.93,
"prompt_id": "prompt-1",
"CPS": 29.3987425,
"std_accuracy": 2.510229073212244
},
"sentiment-analysis": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 55.38999999999999,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 52.400000000000006,
"stderr": null
},
{
"prompt": "prompt-3",
"metric": "f1",
"value": 62.72,
"stderr": null
},
{
"prompt": "prompt-4",
"metric": "f1",
"value": 62.419999999999995,
"stderr": null
},
{
"prompt": "prompt-5",
"metric": "f1",
"value": 53.03,
"stderr": null
},
{
"prompt": "prompt-6",
"metric": "f1",
"value": 51.129999999999995,
"stderr": null
}
],
"average_accuracy": 56.181666666666665,
"best_prompt": 62.72,
"prompt_id": "prompt-3",
"CPS": 58.619157333333334,
"std_accuracy": 5.139001524291139
},
"summarization-fanpage": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "rouge1",
"value": 27.47,
"stderr": 0.0014
},
{
"prompt": "prompt-2",
"metric": "rouge1",
"value": 28.199999999999996,
"stderr": 0.0014
}
],
"average_accuracy": 27.834999999999997,
"best_prompt": 28.199999999999996,
"prompt_id": "prompt-2",
"CPS": 28.09707,
"std_accuracy": 0.5161879502661775
},
"text-entailment": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 58.25,
"stderr": 0.0247
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 63.5,
"stderr": 0.0241
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": 58.25,
"stderr": 0.0247
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": 55.75,
"stderr": 0.0249
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": 53.5,
"stderr": 0.025
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": 51.5,
"stderr": 0.025
}
],
"average_accuracy": 56.791666666666664,
"best_prompt": 63.5,
"prompt_id": "prompt-2",
"CPS": 59.24020833333333,
"std_accuracy": 4.223199813727343
},
"word-in-context": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 56.120000000000005,
"stderr": 0.0259
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 43.2,
"stderr": 0.0289
},
{
"prompt": "prompt-3",
"metric": "f1",
"value": 64.81,
"stderr": 0.0207
},
{
"prompt": "prompt-4",
"metric": "f1",
"value": 63.56,
"stderr": 0.0211
},
{
"prompt": "prompt-5",
"metric": "f1",
"value": 46.36,
"stderr": 0.029
},
{
"prompt": "prompt-6",
"metric": "f1",
"value": 38.5,
"stderr": 0.032
}
],
"average_accuracy": 52.09166666666667,
"best_prompt": 64.81,
"prompt_id": "prompt-3",
"CPS": 56.56724816666667,
"std_accuracy": 11.010479402217992
},
"MAIA-MC": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": null,
"stderr": null
}
],
"average_accuracy": null,
"best_prompt": null,
"prompt_id": null,
"CPS": null,
"std_accuracy": null
},
"MAIA-GEN": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "rouge1",
"value": null,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "rouge1",
"value": null,
"stderr": null
}
],
"average_accuracy": null,
"best_prompt": null,
"prompt_id": null,
"CPS": null,
"std_accuracy": null
}
}
}