|
|
from dataclasses import dataclass |
|
|
from enum import Enum |
|
|
|
|
|
@dataclass |
|
|
class Task: |
|
|
benchmark: str |
|
|
metric: str |
|
|
metric_type: str |
|
|
col_name: str |
|
|
|
|
|
|
|
|
|
|
|
class Tasks(Enum): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
task1 = Task("RE_1", "acc", "CPS", "REL-E3C") |
|
|
task2 = Task("RE_2", "acc", "average_accuracy", "REL-E3C Prompt Average") |
|
|
task3 = Task("RE_5", "acc", "std_accuracy", "REL-E3C Prompt Std") |
|
|
task4 = Task("RE_3", "acc", "best_prompt", "REL-E3C Best Prompt") |
|
|
task5 = Task("RE_4", "acc", "prompt_id", "REL-E3C Best Prompt Id") |
|
|
|
|
|
task6 = Task("NER_1", "acc", "CPS", "NER-E3C") |
|
|
task7 = Task("NER_2", "acc", "average_accuracy", "NER-E3C Prompt Average") |
|
|
task8 = Task("NER_3", "acc", "std_accuracy", "NER-E3C Prompt Std") |
|
|
task9 = Task("NER_4", "acc", "best_prompt", "NER-E3C Best Prompt") |
|
|
task10 = Task("NER_5", "acc", "prompt_id", "NER-E3C Best Prompt Id") |
|
|
|
|
|
task11 = Task("RML-CRF_1", "acc", "CPS", "CRF-RML") |
|
|
task12 = Task("RML-CRF_2", "acc", "average_accuracy", "CRF-RML Prompt Average") |
|
|
task13 = Task("RML-CRF_3", "acc", "std_accuracy", "CRF-RML Prompt Std") |
|
|
task14 = Task("RML-CRF_4", "acc", "best_prompt", "CRF-RML Best Prompt") |
|
|
task15 = Task("RML-CRF_5", "acc", "prompt_id", "CRF-RML Best Prompt Id") |
|
|
|
|
|
|
|
|
|
|
|
task16 = Task("DIA-CRF_1", "acc", "CPS", "CRF-DIA") |
|
|
task17 = Task("DIA-CRF_2", "acc", "average_accuracy", "CRF-DIA Prompt Average") |
|
|
task18 = Task("DIA-CRF_3", "acc", "std_accuracy", "CRF-DIA Prompt Std") |
|
|
task19 = Task("DIA-CRF_4", "acc", "best_prompt", "CRF-DIA Best Prompt") |
|
|
task20 = Task("DIA-CRF_5", "acc", "prompt_id", "CRF-DIA Best Prompt Id") |
|
|
|
|
|
task21 = Task("HIS-CRF_1", "acc", "CPS", "CRF-HIS") |
|
|
task22 = Task("HIS-CRF_2", "acc", "average_accuracy", "CRF-HIS Prompt Average") |
|
|
task23 = Task("HIS-CRF_3", "acc", "std_accuracy", "CRF-HIS Prompt Std") |
|
|
task24 = Task("HIS-CRF_4", "acc", "best_prompt", "CRF-HIS Best Prompt") |
|
|
task25 = Task("HIS-CRF_5", "acc", "prompt_id", "CRF-HIS Best Prompt Id") |
|
|
|
|
|
task26 = Task("NER-PHARMAER_1", "acc", "CPS", "NER-PHA") |
|
|
task27 = Task("NER-PHARMAER_2", "acc", "average_accuracy", "NER-PHA Prompt Average") |
|
|
task28 = Task("NER-PHARMAER_3", "acc", "std_accuracy", "NER-PHA Prompt Std") |
|
|
task29 = Task("NER-PHARMAER_4", "acc", "best_prompt", "NER-PHA Best Prompt") |
|
|
task30 = Task("NER-PHARMAER_5", "acc", "prompt_id", "NER-PHA Best Prompt Id") |
|
|
|
|
|
''' |
|
|
task0 = Task("TextualEntailment", "acc", "Textual Entailment") |
|
|
task1 = Task("TextualEntailment_best", "acc", "TextualEntailment Best") |
|
|
task2 = Task("Sentiment Analysis", "acc", "Sentiment Analysis") |
|
|
task3 = Task("Sentiment Analysis_best", "acc", "Sentiment Analysis_best") |
|
|
task4 = Task("Hate Speech", "acc", "Hate Speech") |
|
|
task5 = Task("Hate Speech_best", "acc", "Hate Speech_best") |
|
|
task6 = Task("Admission Test", "acc", "Admission Test") |
|
|
task7 = Task("Admission Test_best", "acc", "Admission Test_best") |
|
|
task8 = Task("Word in Context", "acc", "Word in Context") |
|
|
task9 = Task("Word in Context_best", "acc", "Word in Context_best") |
|
|
task10 = Task("FAQ", "acc", "FAQ") |
|
|
task11 = Task("FAQ_best", "acc", "FAQ_best") |
|
|
task12 = Task("Lexical Substitution", "acc", "Lexical Substitution") |
|
|
task13 = Task("Lexical Substitution_best", "acc", "Lexical Substitution_best") |
|
|
task14 = Task("Summarization", "acc", "Summarization") |
|
|
task15 = Task("Summarization_best", "acc", "Summarization_best") |
|
|
task16 = Task("NER", "acc", "NER") |
|
|
task17 = Task("NER_best", "acc", "NER_best") |
|
|
task18 = Task("REL", "acc", "REL") |
|
|
task19 = Task("REL_best", "acc", "REL_best") |
|
|
''' |
|
|
|
|
|
|
|
|
TITLE = """<h1 align="center" id="space-title">๐ ECREAM-LLM Leaderboard ๐</h1>""" |
|
|
|
|
|
|
|
|
INTRODUCTION_TEXT = """ |
|
|
<br><br><b>The eCream-LLM leaderboard </b>, developed within <a href='https://ecreamproject.eu/'> the eCream Project </a> (enabling Clinical Research in Emergency and Acute care Medicine), is designed to evaluate Large Language Models (LLMs) on several tasks pertaining to the medical domain. Its distinguishing features are:<b> <br> (i) all tasks are implemented for six languages including English, Italian, Slovak, Slovenian, Polish and Greek; <br> (ii) the leaderboard includes generative tasks, allowing for a more natural interaction with LLMs; <br> (iii) all tasks are evaluated against multiple prompts, this way mitigating the model sensitivity to specific prompts and allowing a fairer evaluation.</b> |
|
|
<br><br>**<small>Generative tasks:</small>** <small> ๐ท๏ธNER-E3C (Named Entity Recognition - E3C), ๐REL-E3C (Relation Extraction -E3C), ๐CRF-RML(CRF RML), NER-PHA ( Named Entity Recognition - PharamaER.IT) </small> |
|
|
<br>**<small>Multiple-choice task:</small>** <small> ๐ฅCRF-DIA (CRF Diagnosis), ๐CRF-HIS (CRF History) </small> |
|
|
|
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
LLM_BENCHMARKS_TEXT = f""" |
|
|
### Groups |
|
|
|
|
|
- `evalita-mp`: All tasks (perplexity and non-perplexity based). |
|
|
- `evalita-mp_gen`: Only generative tasks. |
|
|
|
|
|
#### Tasks |
|
|
|
|
|
The following Evalita-LLM tasks can also be evaluated in isolation: |
|
|
- `evalita-mp_ner_group`: Named Entity Recognition (NER) |
|
|
- `evalita-mp_re`: Relation Extraction (REL) |
|
|
|
|
|
|
|
|
### Usage |
|
|
|
|
|
```bash |
|
|
|
|
|
lm_eval --model hf --model_args pretrained=meta-llama/Llama-2-7b-hf --tasks evalita-mp_re --device cuda:0 --batch_size 1 |
|
|
``` |
|
|
|
|
|
<!-- |
|
|
### Checklist |
|
|
|
|
|
* [x] Is the task an existing benchmark in the literature? |
|
|
* [x] Have you referenced the original paper that introduced the task? |
|
|
* [x] If yes, does the original paper provide a reference implementation? |
|
|
* [x] Yes, original implementation contributed by author of the benchmark |
|
|
|
|
|
If other tasks on this dataset are already supported: |
|
|
* [x] Is the "Main" variant of this task clearly denoted? |
|
|
* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? |
|
|
* [x] Have you noted which, if any, published evaluation setups are matched by this variant? |
|
|
--> |
|
|
|
|
|
|
|
|
""" |
|
|
|
|
|
EVALUATION_QUEUE_TEXT = """ |
|
|
## Some good practices before submitting a model |
|
|
|
|
|
### 1) Make sure you can load your model and tokenizer using AutoClasses: |
|
|
```python |
|
|
from transformers import AutoConfig, AutoModel, AutoTokenizer |
|
|
config = AutoConfig.from_pretrained("your model name", revision=revision) |
|
|
model = AutoModel.from_pretrained("your model name", revision=revision) |
|
|
tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision) |
|
|
``` |
|
|
If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded. |
|
|
|
|
|
Note: make sure your model is public! |
|
|
Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted! |
|
|
|
|
|
### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index) |
|
|
It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`! |
|
|
|
|
|
### 3) Make sure your model has an open license! |
|
|
This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model ๐ค |
|
|
|
|
|
### 4) Fill up your model card |
|
|
When we add extra information about models to the leaderboard, it will be automatically taken from the model card |
|
|
|
|
|
## In case of model failure |
|
|
If your model is displayed in the `FAILED` category, its execution stopped. |
|
|
Make sure you have followed the above steps first. |
|
|
If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task). |
|
|
""" |
|
|
|
|
|
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results" |
|
|
CITATION_BUTTON_TEXT = r""" |
|
|
@article{magnini2025cost, |
|
|
title={A cost-effective approach to counterbalance the scarcity of medical datasets}, |
|
|
author={Magnini, Bernardo and Farzi, Saeed and Ferrazzi, Pietro and Ghosh, Soumitra and Lavelli, Alberto and Mezzanotte, Giulia and Speranza, Manuela}, |
|
|
journal={Frontiers in Disaster and Emergency Medicine}, |
|
|
volume={3}, |
|
|
pages={1558200}, |
|
|
year={2025}, |
|
|
publisher={Frontiers Media SA}, |
|
|
url={https://www.frontiersin.org/journals/disaster-and-emergency-medicine/articles/10.3389/femer.2025.1558200/full} |
|
|
} |
|
|
""" |
|
|
|