evalita_llm_results / OpenGVLab /OpenGVLab_InternVL3_5-8B_None.json
rzanoli's picture
Include MAIA tasks into Evalita tasks
cf26c17
{
"average_CPS": 46.54586,
"config": {
"model_name": "OpenGVLab/InternVL3_5-8B",
"num_fewshot": "None",
"batch_size": 1,
"model": "OpenGVLab/InternVL3_5-8B",
"base_model": "InternVLChatModel",
"revision": "9bb6a56ad9cc69db95e2d4eeb15a52bbcac4ef79",
"multimodal": true,
"submitted_time": "2025-08-25 16:38:47+00:00",
"num_params_billion": 8.528318464,
"language": "multilingual"
},
"tasks": {
"admission-test": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 27.0,
"stderr": 0.0199
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 25.4,
"stderr": 0.0195
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": 81.8,
"stderr": 0.0173
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": 80.4,
"stderr": 0.0178
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": 31.2,
"stderr": 0.0207
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": 31.8,
"stderr": 0.0208
}
],
"average_accuracy": 46.26666666666666,
"best_prompt": 81.8,
"prompt_id": "prompt-3",
"CPS": 52.733733333333326,
"std_accuracy": 27.094698128354683
},
"faq": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 30.669999999999998,
"stderr": 0.0231
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 30.42,
"stderr": 0.023
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": 98.25,
"stderr": 0.0065
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": 98.25,
"stderr": 0.0065
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": 30.669999999999998,
"stderr": 0.0231
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": 30.17,
"stderr": 0.023
}
],
"average_accuracy": 53.07166666666668,
"best_prompt": 98.25,
"prompt_id": "prompt-3",
"CPS": 53.862287500000015,
"std_accuracy": 34.99547766021585
},
"hate-speech-detection": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 29.360000000000003,
"stderr": 0.0174
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 37.85,
"stderr": 0.0166
},
{
"prompt": "prompt-3",
"metric": "f1",
"value": 69.3,
"stderr": 0.0116
},
{
"prompt": "prompt-4",
"metric": "f1",
"value": 70.64,
"stderr": 0.0113
},
{
"prompt": "prompt-5",
"metric": "f1",
"value": 61.67,
"stderr": 0.0118
},
{
"prompt": "prompt-6",
"metric": "f1",
"value": 64.52,
"stderr": 0.0122
}
],
"average_accuracy": 55.556666666666665,
"best_prompt": 70.64,
"prompt_id": "prompt-4",
"CPS": 59.98513333333333,
"std_accuracy": 17.515089113865983
},
"lexical-substitution": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 5.87,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 9.59,
"stderr": null
}
],
"average_accuracy": 7.73,
"best_prompt": 9.59,
"prompt_id": "prompt-2",
"CPS": 9.411626,
"std_accuracy": 2.6304372260139566
},
"evalita NER": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 26.926094280174503,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 30.032796897721763,
"stderr": null
}
],
"average_accuracy": 28.47944558894813,
"best_prompt": 30.032796897721763,
"prompt_id": "prompt-2",
"CPS": 29.566282054049676,
"std_accuracy": 2.196770487997665
},
"relation-extraction": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 29.86,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 31.979999999999997,
"stderr": null
}
],
"average_accuracy": 30.919999999999998,
"best_prompt": 31.979999999999997,
"prompt_id": "prompt-2",
"CPS": 31.641012,
"std_accuracy": 1.499066376115479
},
"sentiment-analysis": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 51.080000000000005,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 53.459999999999994,
"stderr": null
},
{
"prompt": "prompt-3",
"metric": "f1",
"value": 70.25,
"stderr": null
},
{
"prompt": "prompt-4",
"metric": "f1",
"value": 72.8,
"stderr": null
},
{
"prompt": "prompt-5",
"metric": "f1",
"value": 60.209999999999994,
"stderr": null
},
{
"prompt": "prompt-6",
"metric": "f1",
"value": 51.300000000000004,
"stderr": null
}
],
"average_accuracy": 59.849999999999994,
"best_prompt": 72.8,
"prompt_id": "prompt-4",
"CPS": 63.37239999999999,
"std_accuracy": 9.662707695051111
},
"summarization-fanpage": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "rouge1",
"value": 28.52,
"stderr": 0.0097
},
{
"prompt": "prompt-2",
"metric": "rouge1",
"value": 28.33,
"stderr": 0.009
}
],
"average_accuracy": 28.424999999999997,
"best_prompt": 28.52,
"prompt_id": "prompt-1",
"CPS": 28.492905999999998,
"std_accuracy": 0.13435028842544494
},
"text-entailment": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": 80.75,
"stderr": 0.0197
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": 77.25,
"stderr": 0.021
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": 79.75,
"stderr": 0.0201
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": 78.25,
"stderr": 0.0207
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": 56.99999999999999,
"stderr": 0.0248
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": 63.74999999999999,
"stderr": 0.0241
}
],
"average_accuracy": 72.79166666666667,
"best_prompt": 80.75,
"prompt_id": "prompt-1",
"CPS": 74.32364583333333,
"std_accuracy": 9.925241390851244
},
"word-in-context": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "f1",
"value": 31.069999999999997,
"stderr": 0.0343
},
{
"prompt": "prompt-2",
"metric": "f1",
"value": 67.58999999999999,
"stderr": 0.0224
},
{
"prompt": "prompt-3",
"metric": "f1",
"value": 61.62,
"stderr": 0.0288
},
{
"prompt": "prompt-4",
"metric": "f1",
"value": 71.88,
"stderr": 0.0236
},
{
"prompt": "prompt-5",
"metric": "f1",
"value": 67.44,
"stderr": 0.0205
},
{
"prompt": "prompt-6",
"metric": "f1",
"value": 49.79,
"stderr": 0.0284
}
],
"average_accuracy": 58.23166666666666,
"best_prompt": 71.88,
"prompt_id": "prompt-4",
"CPS": 62.069578,
"std_accuracy": 15.35674108223052
},
"MAIA-MC": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-3",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-4",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-5",
"metric": "acc",
"value": null,
"stderr": null
},
{
"prompt": "prompt-6",
"metric": "acc",
"value": null,
"stderr": null
}
],
"average_accuracy": null,
"best_prompt": null,
"prompt_id": null,
"CPS": null,
"std_accuracy": null
},
"MAIA-GEN": {
"prompts": [
{
"prompt": "prompt-1",
"metric": "rouge1",
"value": null,
"stderr": null
},
{
"prompt": "prompt-2",
"metric": "rouge1",
"value": null,
"stderr": null
}
],
"average_accuracy": null,
"best_prompt": null,
"prompt_id": null,
"CPS": null,
"std_accuracy": null
}
}
}