evalita_llm_results / microsoft /microsoft_Phi-3.5-mini-instruct_5.json
rzanoli's picture
Include MAIA tasks into Evalita tasks
cf26c17
{
"average_CPS": 50.061157,
"config": {
"model_name": "microsoft/Phi-3.5-mini-instruct",
"num_fewshot": "5",
"batch_size": 1,
"model": "microsoft/Phi-3.5-mini-instruct",
"base_model": "Phi3ForCausalLM",
"revision": "3145e03a9fd4cdd7cd953c34d9bbf7ad606122ca",
"multimodal": false,
"submitted_time": "2024-08-16 20:48:26+00:00",
"num_params_billion": 3.821079552,
"language": "multilingual"
},
"tasks": {
"admission-test": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 36.6,
"stderr": 0.0216
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 37.0,
"stderr": 0.0216
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": 66.4,
"stderr": 0.0211
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": 65.2,
"stderr": 0.0213
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": 36.8,
"stderr": 0.0216
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": 37.0,
"stderr": 0.0216
}
],
"average_accuracy": 46.5,
"best_prompt": 66.4,
"prompt_id": "prompt-3",
"CPS": 53.1864,
"std_accuracy": 14.95526663085617
},
"faq": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 28.43,
"stderr": 0.0226
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 28.43,
"stderr": 0.0226
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": 84.78999999999999,
"stderr": 0.018
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": 88.03,
"stderr": 0.0162
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": 28.93,
"stderr": 0.0227
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": 28.93,
"stderr": 0.0227
}
],
"average_accuracy": 47.923333333333325,
"best_prompt": 88.03,
"prompt_id": "prompt-4",
"CPS": 52.72410133333333,
"std_accuracy": 29.83008324940892
},
"hate-speech-detection": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 63.92,
"stderr": 0.014
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 67.52,
"stderr": 0.0128
},
{
"prompt": "prompt-3",
"metric": "f1",
"value": 71.38,
"stderr": 0.0118
},
{
"prompt": "prompt-4",
"metric": "f1",
"value": 71.39,
"stderr": 0.0114
},
{
"prompt": "prompt-5",
"metric": "f1",
"value": 53.97,
"stderr": 0.0161
},
{
"prompt": "prompt-6",
"metric": "f1",
"value": 63.68000000000001,
"stderr": 0.0135
}
],
"average_accuracy": 65.30999999999999,
"best_prompt": 71.39,
"prompt_id": "prompt-4",
"CPS": 67.049488,
"std_accuracy": 6.5096850922298835
},
"lexical-substitution": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 23.580000000000002,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 24.14,
"stderr": null
}
],
"average_accuracy": 23.86,
"best_prompt": 24.14,
"prompt_id": "prompt-2",
"CPS": 24.072408,
"std_accuracy": 0.3959797974644657
},
"evalita NER": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 32.213990547746,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 30.785665293262237,
"stderr": null
}
],
"average_accuracy": 31.499827920504117,
"best_prompt": 32.213990547746,
"prompt_id": "prompt-1",
"CPS": 31.983930266510765,
"std_accuracy": 1.00997847318547
},
"relation-extraction": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 36.0,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 36.3,
"stderr": null
}
],
"average_accuracy": 36.15,
"best_prompt": 36.3,
"prompt_id": "prompt-2",
"CPS": 36.24555,
"std_accuracy": 0.21213203435596223
},
"sentiment-analysis": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 72.8,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 72.26,
"stderr": null
},
{
"prompt": "prompt-3",
"metric": "f1",
"value": 70.06,
"stderr": null
},
{
"prompt": "prompt-4",
"metric": "f1",
"value": 69.53,
"stderr": null
},
{
"prompt": "prompt-5",
"metric": "f1",
"value": 74.57000000000001,
"stderr": null
},
{
"prompt": "prompt-6",
"metric": "f1",
"value": 74.03999999999999,
"stderr": null
}
],
"average_accuracy": 72.21,
"best_prompt": 74.57000000000001,
"prompt_id": "prompt-5",
"CPS": 72.810148,
"std_accuracy": 2.0529978080845575
},
"summarization-fanpage": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "rouge1",
"value": 19.89,
"stderr": 0.0071
},
{
"prompt": "prompt-2",
"metric": "rouge1",
"value": 19.53,
"stderr": 0.007
}
],
"average_accuracy": 19.71,
"best_prompt": 19.89,
"prompt_id": "prompt-1",
"CPS": 19.854198,
"std_accuracy": 0.2545584412271567
},
"text-entailment": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 78.5,
"stderr": 0.0206
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 79.0,
"stderr": 0.0204
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": 77.5,
"stderr": 0.0209
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": 78.5,
"stderr": 0.0206
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": 73.75,
"stderr": 0.022
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": 74.25,
"stderr": 0.0219
}
],
"average_accuracy": 76.91666666666667,
"best_prompt": 79.0,
"prompt_id": "prompt-2",
"CPS": 77.35416666666667,
"std_accuracy": 2.3166067138525404
},
"word-in-context": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 61.480000000000004,
"stderr": 0.0243
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 64.62,
"stderr": 0.023
},
{
"prompt": "prompt-3",
"metric": "f1",
"value": 62.64999999999999,
"stderr": 0.0254
},
{
"prompt": "prompt-4",
"metric": "f1",
"value": 67.63,
"stderr": 0.023
},
{
"prompt": "prompt-5",
"metric": "f1",
"value": 59.209999999999994,
"stderr": 0.0266
},
{
"prompt": "prompt-6",
"metric": "f1",
"value": 68.25,
"stderr": 0.0214
}
],
"average_accuracy": 63.97333333333333,
"best_prompt": 68.25,
"prompt_id": "prompt-6",
"CPS": 65.331175,
"std_accuracy": 3.542217761045568
},
"MAIA-MC": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": null,
"stderr": null
}
],
"average_accuracy": null,
"best_prompt": null,
"prompt_id": null,
"CPS": null,
"std_accuracy": null
},
"MAIA-GEN": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "rouge1",
"value": null,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "rouge1",
"value": null,
"stderr": null
}
],
"average_accuracy": null,
"best_prompt": null,
"prompt_id": null,
"CPS": null,
"std_accuracy": null
}
}
}