evalita_llm_results / microsoft /microsoft_phi-4_None.json
rzanoli's picture
Include MAIA tasks into Evalita tasks
cf26c17
{
"average_CPS": 38.372307,
"config": {
"model_name": "microsoft/phi-4",
"num_fewshot": "None",
"batch_size": 1,
"model": "microsoft/phi-4",
"base_model": "Phi3ForCausalLM",
"revision": "932b33c0ec9ca189badeb22480721a8de9d0e006",
"multimodal": false,
"submitted_time": "2024-12-11 11:47:29+00:00",
"num_params_billion": 14.6595072,
"language": "en"
},
"tasks": {
"admission-test": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 29.599999999999998,
"stderr": 0.0204
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 26.6,
"stderr": 0.0198
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": 83.2,
"stderr": 0.0167
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": 80.60000000000001,
"stderr": 0.0177
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": 40.0,
"stderr": 0.0219
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": 40.8,
"stderr": 0.022
}
],
"average_accuracy": 50.13333333333333,
"best_prompt": 83.2,
"prompt_id": "prompt-3",
"CPS": 55.68853333333333,
"std_accuracy": 25.246201034347063
},
"faq": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 31.169999999999998,
"stderr": 0.0232
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 31.169999999999998,
"stderr": 0.0232
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": 88.03,
"stderr": 0.0162
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": 95.26,
"stderr": 0.0106
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": 31.919999999999998,
"stderr": 0.0233
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": 31.169999999999998,
"stderr": 0.0232
}
],
"average_accuracy": 51.45333333333334,
"best_prompt": 95.26,
"prompt_id": "prompt-4",
"CPS": 53.52976933333334,
"std_accuracy": 31.217522429985806
},
"hate-speech-detection": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 57.720000000000006,
"stderr": 0.0131
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 61.82,
"stderr": 0.0114
},
{
"prompt": "prompt-3",
"metric": "f1",
"value": 62.69,
"stderr": 0.0112
},
{
"prompt": "prompt-4",
"metric": "f1",
"value": 63.14999999999999,
"stderr": 0.0112
},
{
"prompt": "prompt-5",
"metric": "f1",
"value": 55.14,
"stderr": 0.013
},
{
"prompt": "prompt-6",
"metric": "f1",
"value": 61.92999999999999,
"stderr": 0.0113
}
],
"average_accuracy": 60.40833333333333,
"best_prompt": 63.14999999999999,
"prompt_id": "prompt-4",
"CPS": 61.418637499999996,
"std_accuracy": 3.2253211726379503
},
"lexical-substitution": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 0.0,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 0.0,
"stderr": null
}
],
"average_accuracy": 0.0,
"best_prompt": 0.0,
"prompt_id": "prompt-1",
"CPS": 0.0,
"std_accuracy": 0.0
},
"evalita NER": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 0.3643625787687833,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 0.0,
"stderr": null
}
],
"average_accuracy": 0.18218128938439165,
"best_prompt": 0.3643625787687833,
"prompt_id": "prompt-1",
"CPS": 0.3636987783247481,
"std_accuracy": 0.25764325025802426
},
"relation-extraction": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 21.66,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 17.11,
"stderr": null
}
],
"average_accuracy": 19.384999999999998,
"best_prompt": 21.66,
"prompt_id": "prompt-1",
"CPS": 21.167234999999998,
"std_accuracy": 3.217335854398792
},
"sentiment-analysis": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 27.13,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 26.640000000000004,
"stderr": null
},
{
"prompt": "prompt-3",
"metric": "f1",
"value": 69.99,
"stderr": null
},
{
"prompt": "prompt-4",
"metric": "f1",
"value": 74.26,
"stderr": null
},
{
"prompt": "prompt-5",
"metric": "f1",
"value": 29.330000000000002,
"stderr": null
},
{
"prompt": "prompt-6",
"metric": "f1",
"value": 26.52,
"stderr": null
}
],
"average_accuracy": 42.31166666666667,
"best_prompt": 74.26,
"prompt_id": "prompt-4",
"CPS": 50.535167666666666,
"std_accuracy": 23.155002411286134
},
"summarization-fanpage": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "rouge1",
"value": 25.69,
"stderr": 0.0085
},
{
"prompt": "prompt-2",
"metric": "rouge1",
"value": 0.8699999999999999,
"stderr": 0.0044
}
],
"average_accuracy": 13.280000000000001,
"best_prompt": 25.69,
"prompt_id": "prompt-1",
"CPS": 22.501871,
"std_accuracy": 17.550390309050112
},
"text-entailment": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 59.5,
"stderr": 0.0246
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 56.25,
"stderr": 0.0248
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": 66.0,
"stderr": 0.0237
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": 73.25,
"stderr": 0.0222
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": 52.5,
"stderr": 0.025
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": 55.00000000000001,
"stderr": 0.0249
}
],
"average_accuracy": 60.416666666666664,
"best_prompt": 73.25,
"prompt_id": "prompt-4",
"CPS": 63.84958333333333,
"std_accuracy": 7.824108042880456
},
"word-in-context": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 17.03,
"stderr": 0.0284
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 10.18,
"stderr": 0.025
},
{
"prompt": "prompt-3",
"metric": "f1",
"value": 69.01,
"stderr": 0.0199
},
{
"prompt": "prompt-4",
"metric": "f1",
"value": 61.42999999999999,
"stderr": 0.0272
},
{
"prompt": "prompt-5",
"metric": "f1",
"value": 64.96,
"stderr": 0.021
},
{
"prompt": "prompt-6",
"metric": "f1",
"value": 66.75999999999999,
"stderr": 0.0199
}
],
"average_accuracy": 48.228333333333325,
"best_prompt": 69.01,
"prompt_id": "prompt-3",
"CPS": 54.66857183333333,
"std_accuracy": 27.02052806047037
},
"MAIA-MC": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": null,
"stderr": null
}
],
"average_accuracy": null,
"best_prompt": null,
"prompt_id": null,
"CPS": null,
"std_accuracy": null
},
"MAIA-GEN": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "rouge1",
"value": null,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "rouge1",
"value": null,
"stderr": null
}
],
"average_accuracy": null,
"best_prompt": null,
"prompt_id": null,
"CPS": null,
"std_accuracy": null
}
}
}