{ "average_CPS": 49.301524, "config": { "model_name": "CohereForAI/aya-expanse-8b", "num_fewshot": "5", "batch_size": 1, "model": "CohereForAI/aya-expanse-8b", "base_model": "CohereForCausalLM", "revision": "554c52e22d0f713bab9d3e360734d25cd15dda16", "multimodal": false, "submitted_time": "2024-10-23 06:34:13+00:00", "num_params_billion": 8.028033024, "language": "en_fr_de_es_it_pt_ja_ko_zh_ar_el_fa_pl_id_cs_he_hi_nl_ro_ru_tr_uk_vi" }, "tasks": { "admission-test": { "prompts": [ { "prompt": "prompt-1", "metric": "acc", "value": 37.4, "stderr": 0.0217 }, { "prompt": "prompt-2", "metric": "acc", "value": 37.6, "stderr": 0.0217 }, { "prompt": "prompt-3", "metric": "acc", "value": 62.8, "stderr": 0.0216 }, { "prompt": "prompt-4", "metric": "acc", "value": 63.4, "stderr": 0.0216 }, { "prompt": "prompt-5", "metric": "acc", "value": 38.6, "stderr": 0.0218 }, { "prompt": "prompt-6", "metric": "acc", "value": 37.8, "stderr": 0.0217 } ], "average_accuracy": 46.26666666666667, "best_prompt": 63.4, "prompt_id": "prompt-4", "CPS": 52.537466666666674, "std_accuracy": 13.046787599507653 }, "faq": { "prompts": [ { "prompt": "prompt-1", "metric": "acc", "value": 31.669999999999998, "stderr": 0.0233 }, { "prompt": "prompt-2", "metric": "acc", "value": 31.169999999999998, "stderr": 0.0232 }, { "prompt": "prompt-3", "metric": "acc", "value": 92.02, "stderr": 0.0135 }, { "prompt": "prompt-4", "metric": "acc", "value": 94.01, "stderr": 0.0119 }, { "prompt": "prompt-5", "metric": "acc", "value": 31.169999999999998, "stderr": 0.0232 }, { "prompt": "prompt-6", "metric": "acc", "value": 31.169999999999998, "stderr": 0.0232 } ], "average_accuracy": 51.86833333333334, "best_prompt": 94.01, "prompt_id": "prompt-4", "CPS": 54.39261916666667, "std_accuracy": 31.878871006776052 }, "hate-speech-detection": { "prompts": [ { "prompt": "prompt-1", "metric": "f1", "value": 61.919999999999995, "stderr": 0.0136 }, { "prompt": "prompt-2", "metric": "f1", "value": 66.52, "stderr": 0.0126 }, { "prompt": "prompt-3", "metric": "f1", "value": 70.95, "stderr": 0.0116 }, { "prompt": "prompt-4", "metric": "f1", "value": 72.71, "stderr": 0.0115 }, { "prompt": "prompt-5", "metric": "f1", "value": 61.83, "stderr": 0.0136 }, { "prompt": "prompt-6", "metric": "f1", "value": 61.06, "stderr": 0.0136 } ], "average_accuracy": 65.83166666666666, "best_prompt": 72.71, "prompt_id": "prompt-4", "CPS": 67.70876383333334, "std_accuracy": 5.06094622246341 }, "lexical-substitution": { "prompts": [ { "prompt": "prompt-1", "metric": "f1", "value": 20.14, "stderr": null }, { "prompt": "prompt-2", "metric": "f1", "value": 20.599999999999998, "stderr": null } ], "average_accuracy": 20.369999999999997, "best_prompt": 20.599999999999998, "prompt_id": "prompt-2", "CPS": 20.552619999999997, "std_accuracy": 0.32526911934580993 }, "evalita NER": { "prompts": [ { "prompt": "prompt-1", "metric": "acc", "value": 35.57788293746971, "stderr": null }, { "prompt": "prompt-2", "metric": "acc", "value": 35.76877120698013, "stderr": null } ], "average_accuracy": 35.67332707222492, "best_prompt": 35.76877120698013, "prompt_id": "prompt-2", "CPS": 35.734632012789056, "std_accuracy": 0.13497838981978103 }, "relation-extraction": { "prompts": [ { "prompt": "prompt-1", "metric": "f1", "value": 36.559999999999995, "stderr": null }, { "prompt": "prompt-2", "metric": "f1", "value": 36.559999999999995, "stderr": null } ], "average_accuracy": 36.559999999999995, "best_prompt": 36.559999999999995, "prompt_id": "prompt-1", "CPS": 36.559999999999995, "std_accuracy": 0.0 }, "sentiment-analysis": { "prompts": [ { "prompt": "prompt-1", "metric": "f1", "value": 71.67999999999999, "stderr": null }, { "prompt": "prompt-2", "metric": "f1", "value": 70.12, "stderr": null }, { "prompt": "prompt-3", "metric": "f1", "value": 71.36, "stderr": null }, { "prompt": "prompt-4", "metric": "f1", "value": 72.36, "stderr": null }, { "prompt": "prompt-5", "metric": "f1", "value": 73.3, "stderr": null }, { "prompt": "prompt-6", "metric": "f1", "value": 72.59, "stderr": null } ], "average_accuracy": 71.90166666666669, "best_prompt": 73.3, "prompt_id": "prompt-5", "CPS": 72.27502166666667, "std_accuracy": 1.1096020307599763 }, "summarization-fanpage": { "prompts": [ { "prompt": "prompt-1", "metric": "rouge1", "value": 18.33, "stderr": 0.0063 }, { "prompt": "prompt-2", "metric": "rouge1", "value": 18.22, "stderr": 0.0061 } ], "average_accuracy": 18.275, "best_prompt": 18.33, "prompt_id": "prompt-1", "CPS": 18.319918499999996, "std_accuracy": 0.07778174593051983 }, "text-entailment": { "prompts": [ { "prompt": "prompt-1", "metric": "acc", "value": 75.25, "stderr": 0.0216 }, { "prompt": "prompt-2", "metric": "acc", "value": 76.25, "stderr": 0.0213 }, { "prompt": "prompt-3", "metric": "acc", "value": 74.25, "stderr": 0.0219 }, { "prompt": "prompt-4", "metric": "acc", "value": 72.5, "stderr": 0.0224 }, { "prompt": "prompt-5", "metric": "acc", "value": 67.75, "stderr": 0.0234 }, { "prompt": "prompt-6", "metric": "acc", "value": 65.25, "stderr": 0.0238 } ], "average_accuracy": 71.875, "best_prompt": 76.25, "prompt_id": "prompt-2", "CPS": 72.9140625, "std_accuracy": 4.415172703303915 }, "word-in-context": { "prompts": [ { "prompt": "prompt-1", "metric": "f1", "value": 58.75, "stderr": 0.0272 }, { "prompt": "prompt-2", "metric": "f1", "value": 58.46, "stderr": 0.0269 }, { "prompt": "prompt-3", "metric": "f1", "value": 66.97999999999999, "stderr": 0.0213 }, { "prompt": "prompt-4", "metric": "f1", "value": 55.16, "stderr": 0.0276 }, { "prompt": "prompt-5", "metric": "f1", "value": 57.589999999999996, "stderr": 0.0262 }, { "prompt": "prompt-6", "metric": "f1", "value": 60.51, "stderr": 0.0245 } ], "average_accuracy": 59.574999999999996, "best_prompt": 66.97999999999999, "prompt_id": "prompt-3", "CPS": 62.02013099999999, "std_accuracy": 4.026525797756669 }, "MAIA-MC": { "prompts": [ { "prompt": "prompt-1", "metric": "acc", "value": null, "stderr": null }, { "prompt": "prompt-2", "metric": "acc", "value": null, "stderr": null }, { "prompt": "prompt-3", "metric": "acc", "value": null, "stderr": null }, { "prompt": "prompt-4", "metric": "acc", "value": null, "stderr": null }, { "prompt": "prompt-5", "metric": "acc", "value": null, "stderr": null }, { "prompt": "prompt-6", "metric": "acc", "value": null, "stderr": null } ], "average_accuracy": null, "best_prompt": null, "prompt_id": null, "CPS": null, "std_accuracy": null }, "MAIA-GEN": { "prompts": [ { "prompt": "prompt-1", "metric": "rouge1", "value": null, "stderr": null }, { "prompt": "prompt-2", "metric": "rouge1", "value": null, "stderr": null } ], "average_accuracy": null, "best_prompt": null, "prompt_id": null, "CPS": null, "std_accuracy": null } } }