evalita_llm_results / Fastweb /Fastweb_FastwebMIIA-7B_None.json
rzanoli's picture
Include MAIA tasks into Evalita tasks
cf26c17
{
"average_CPS": 34.706864,
"config": {
"model_name": "Fastweb/FastwebMIIA-7B",
"num_fewshot": "None",
"batch_size": 1,
"model": "Fastweb/FastwebMIIA-7B",
"base_model": "MistralForCausalLM",
"revision": "b2f5d54180b117f9b15fad8e6306e10b2e7f6324",
"multimodal": false,
"submitted_time": "2025-05-30 08:51:45+00:00",
"num_params_billion": 7.391399936,
"language": "it_en"
},
"tasks": {
"admission-test": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 26.400000000000002,
"stderr": 0.0197
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 24.0,
"stderr": 0.0191
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": 41.8,
"stderr": 0.0221
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": 45.4,
"stderr": 0.0223
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": 30.4,
"stderr": 0.0206
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": 30.4,
"stderr": 0.0206
}
],
"average_accuracy": 33.06666666666667,
"best_prompt": 45.4,
"prompt_id": "prompt-4",
"CPS": 39.800666666666665,
"std_accuracy": 8.59364105991556
},
"faq": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 27.43,
"stderr": 0.0223
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 28.18,
"stderr": 0.0225
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": 34.910000000000004,
"stderr": 0.0238
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": 46.129999999999995,
"stderr": 0.0249
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": 28.68,
"stderr": 0.0226
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": 28.93,
"stderr": 0.0227
}
],
"average_accuracy": 32.37666666666667,
"best_prompt": 46.129999999999995,
"prompt_id": "prompt-4",
"CPS": 39.78558733333333,
"std_accuracy": 7.2553198872735205
},
"hate-speech-detection": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 60.99,
"stderr": 0.0116
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 60.919999999999995,
"stderr": 0.0117
},
{
"prompt": "prompt-3",
"metric": "f1",
"value": 5.27,
"stderr": 0.0108
},
{
"prompt": "prompt-4",
"metric": "f1",
"value": 56.2,
"stderr": 0.0136
},
{
"prompt": "prompt-5",
"metric": "f1",
"value": 60.62,
"stderr": 0.0116
},
{
"prompt": "prompt-6",
"metric": "f1",
"value": 34.160000000000004,
"stderr": 0.0164
}
],
"average_accuracy": 46.36000000000001,
"best_prompt": 60.99,
"prompt_id": "prompt-1",
"CPS": 52.067163,
"std_accuracy": 22.642578475076554
},
"lexical-substitution": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 0.0,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 0.0,
"stderr": null
}
],
"average_accuracy": 0.0,
"best_prompt": 0.0,
"prompt_id": "prompt-1",
"CPS": 0.0,
"std_accuracy": 0.0
},
"evalita NER": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 9.340104217159476,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 9.29838584585555,
"stderr": null
}
],
"average_accuracy": 9.319245031507513,
"best_prompt": 9.340104217159476,
"prompt_id": "prompt-1",
"CPS": 9.338155947480733,
"std_accuracy": 0.02949934324906417
},
"relation-extraction": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 4.2,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 0.62,
"stderr": null
}
],
"average_accuracy": 2.41,
"best_prompt": 4.2,
"prompt_id": "prompt-1",
"CPS": 4.12482,
"std_accuracy": 2.53144227664784
},
"sentiment-analysis": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 46.98,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 42.76,
"stderr": null
},
{
"prompt": "prompt-3",
"metric": "f1",
"value": 62.9,
"stderr": null
},
{
"prompt": "prompt-4",
"metric": "f1",
"value": 52.03,
"stderr": null
},
{
"prompt": "prompt-5",
"metric": "f1",
"value": 51.57000000000001,
"stderr": null
},
{
"prompt": "prompt-6",
"metric": "f1",
"value": 26.939999999999998,
"stderr": null
}
],
"average_accuracy": 47.196666666666665,
"best_prompt": 62.9,
"prompt_id": "prompt-3",
"CPS": 53.02260333333333,
"std_accuracy": 11.992875662937005
},
"summarization-fanpage": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "rouge1",
"value": 23.46,
"stderr": 0.0017
},
{
"prompt": "prompt-2",
"metric": "rouge1",
"value": 30.2,
"stderr": 0.0013
}
],
"average_accuracy": 26.83,
"best_prompt": 30.2,
"prompt_id": "prompt-2",
"CPS": 29.182259999999996,
"std_accuracy": 4.765899705197329
},
"text-entailment": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 55.25,
"stderr": 0.0249
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 55.00000000000001,
"stderr": 0.0249
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": 58.25,
"stderr": 0.0247
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": 68.25,
"stderr": 0.0233
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": 56.99999999999999,
"stderr": 0.0248
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": 52.75,
"stderr": 0.025
}
],
"average_accuracy": 57.75,
"best_prompt": 68.25,
"prompt_id": "prompt-4",
"CPS": 61.08375,
"std_accuracy": 5.474942922076904
},
"word-in-context": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 65.68,
"stderr": 0.0203
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 66.58,
"stderr": 0.0199
},
{
"prompt": "prompt-3",
"metric": "f1",
"value": 44.15,
"stderr": 0.0291
},
{
"prompt": "prompt-4",
"metric": "f1",
"value": 30.599999999999998,
"stderr": 0.0314
},
{
"prompt": "prompt-5",
"metric": "f1",
"value": 62.36000000000001,
"stderr": 0.0213
},
{
"prompt": "prompt-6",
"metric": "f1",
"value": 58.77,
"stderr": 0.0236
}
],
"average_accuracy": 54.69,
"best_prompt": 66.58,
"prompt_id": "prompt-2",
"CPS": 58.663638,
"std_accuracy": 14.346060086309413
},
"MAIA-MC": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": null,
"stderr": null
}
],
"average_accuracy": null,
"best_prompt": null,
"prompt_id": null,
"CPS": null,
"std_accuracy": null
},
"MAIA-GEN": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "rouge1",
"value": null,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "rouge1",
"value": null,
"stderr": null
}
],
"average_accuracy": null,
"best_prompt": null,
"prompt_id": null,
"CPS": null,
"std_accuracy": null
}
}
}