Duplicate from qanastek/MORFITT
Browse filesCo-authored-by: yanis labrak <qanastek@users.noreply.huggingface.co>
- .gitattributes +54 -0
- MORFITT.py +111 -0
- README.md +120 -0
- data.zip +3 -0
- distributions_co-references-fixed.png +3 -0
- distributions_nbr_elements_colors.png +3 -0
- test_dataset.py +14 -0
.gitattributes
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
# Audio files - uncompressed
|
| 37 |
+
*.pcm filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
*.sam filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
*.raw filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
# Audio files - compressed
|
| 41 |
+
*.aac filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
# Image files - uncompressed
|
| 47 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
# Image files - compressed
|
| 52 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
MORFITT.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import random
|
| 4 |
+
|
| 5 |
+
import datasets
|
| 6 |
+
import numpy as np
|
| 7 |
+
import pandas as pd
|
| 8 |
+
|
| 9 |
+
_CITATION = """\
|
| 10 |
+
ddd
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
_DESCRIPTION = """\
|
| 14 |
+
This article presents MORFITT, the first multi-label corpus in French annotated in
|
| 15 |
+
specialties in the medical field. MORFITT is composed of 3~624 abstracts of scientific
|
| 16 |
+
articles from PubMed, annotated in 12 specialties for a total of 5,116 annotations.
|
| 17 |
+
We detail the corpus, the experiments and the preliminary results obtained using a
|
| 18 |
+
classifier based on the pre-trained language model CamemBERT. These preliminary results
|
| 19 |
+
demonstrate the difficulty of the task, with a weighted average F1-score of 61.78%.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
_HOMEPAGE = "ddd"
|
| 23 |
+
|
| 24 |
+
_URL = "https://huggingface.co/datasets/Dr-BERT/MORFITT/resolve/main/data.zip"
|
| 25 |
+
|
| 26 |
+
_LICENSE = "unknown"
|
| 27 |
+
|
| 28 |
+
_SPECIALITIES = ['microbiology', 'etiology', 'virology', 'physiology', 'immunology', 'parasitology', 'genetics', 'chemistry', 'veterinary', 'surgery', 'pharmacology', 'psychology']
|
| 29 |
+
|
| 30 |
+
class MORFITT(datasets.GeneratorBasedBuilder):
|
| 31 |
+
|
| 32 |
+
DEFAULT_CONFIG_NAME = "source"
|
| 33 |
+
|
| 34 |
+
BUILDER_CONFIGS = [
|
| 35 |
+
datasets.BuilderConfig(name="source", version="1.0.0", description="The MORFITT corpora"),
|
| 36 |
+
]
|
| 37 |
+
|
| 38 |
+
def _info(self):
|
| 39 |
+
|
| 40 |
+
features = datasets.Features(
|
| 41 |
+
{
|
| 42 |
+
"id": datasets.Value("string"),
|
| 43 |
+
"abstract": datasets.Value("string"),
|
| 44 |
+
"specialities": datasets.Sequence(
|
| 45 |
+
datasets.features.ClassLabel(names=_SPECIALITIES),
|
| 46 |
+
),
|
| 47 |
+
"specialities_one_hot": datasets.Sequence(
|
| 48 |
+
datasets.Value("float"),
|
| 49 |
+
),
|
| 50 |
+
}
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
return datasets.DatasetInfo(
|
| 54 |
+
description=_DESCRIPTION,
|
| 55 |
+
features=features,
|
| 56 |
+
supervised_keys=None,
|
| 57 |
+
homepage=_HOMEPAGE,
|
| 58 |
+
license=str(_LICENSE),
|
| 59 |
+
citation=_CITATION,
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
def _split_generators(self, dl_manager):
|
| 63 |
+
|
| 64 |
+
data_dir = dl_manager.download_and_extract(_URL).rstrip("/")
|
| 65 |
+
|
| 66 |
+
return [
|
| 67 |
+
datasets.SplitGenerator(
|
| 68 |
+
name=datasets.Split.TRAIN,
|
| 69 |
+
gen_kwargs={
|
| 70 |
+
"tsv_file": data_dir + "/train.tsv",
|
| 71 |
+
"split": "train",
|
| 72 |
+
},
|
| 73 |
+
),
|
| 74 |
+
datasets.SplitGenerator(
|
| 75 |
+
name=datasets.Split.VALIDATION,
|
| 76 |
+
gen_kwargs={
|
| 77 |
+
"tsv_file": data_dir + "/dev.tsv",
|
| 78 |
+
"split": "validation",
|
| 79 |
+
},
|
| 80 |
+
),
|
| 81 |
+
datasets.SplitGenerator(
|
| 82 |
+
name=datasets.Split.TEST,
|
| 83 |
+
gen_kwargs={
|
| 84 |
+
"tsv_file": data_dir + "/test.tsv",
|
| 85 |
+
"split": "test",
|
| 86 |
+
},
|
| 87 |
+
),
|
| 88 |
+
]
|
| 89 |
+
|
| 90 |
+
def _generate_examples(self, tsv_file, split):
|
| 91 |
+
|
| 92 |
+
# Load TSV file
|
| 93 |
+
df = pd.read_csv(tsv_file, sep="\t")
|
| 94 |
+
|
| 95 |
+
for index, e in df.iterrows():
|
| 96 |
+
|
| 97 |
+
specialities = e["specialities"].split("|")
|
| 98 |
+
|
| 99 |
+
# Empty one hot vector
|
| 100 |
+
one_hot = [0.0 for i in _SPECIALITIES]
|
| 101 |
+
|
| 102 |
+
# Fill up the one hot vector
|
| 103 |
+
for s in specialities:
|
| 104 |
+
one_hot[_SPECIALITIES.index(s)] = 1.0
|
| 105 |
+
|
| 106 |
+
yield e["identifier"], {
|
| 107 |
+
"id": e["identifier"],
|
| 108 |
+
"abstract": e["abstract"],
|
| 109 |
+
"specialities": specialities,
|
| 110 |
+
"specialities_one_hot": one_hot,
|
| 111 |
+
}
|
README.md
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
task_categories:
|
| 4 |
+
- text-classification
|
| 5 |
+
language:
|
| 6 |
+
- fr
|
| 7 |
+
tags:
|
| 8 |
+
- medical
|
| 9 |
+
- biology
|
| 10 |
+
pretty_name: MORFITT
|
| 11 |
+
size_categories:
|
| 12 |
+
- 1K<n<10K
|
| 13 |
+
---
|
| 14 |
+
|
| 15 |
+
# MORFITT
|
| 16 |
+
|
| 17 |
+
## Data ([Zenodo](https://zenodo.org/record/7893841#.ZFLFDnZBybg)) | Publication ([HAL](https://hal.science/hal-04131591/))
|
| 18 |
+
[Yanis LABRAK](https://www.linkedin.com/in/yanis-labrak-8a7412145/), [Richard DUFOUR](https://cv.hal.science/richard-dufour), [Mickaël ROUVIER](https://cv.hal.science/mickael-rouvier)
|
| 19 |
+
|
| 20 |
+
[](https://colab.research.google.com/drive/115EixHBcjf-se6xQeaTwZWE1i4idTNbm?usp=sharing) or [](https://github.com/qanastek/MORFITT/blob/main/TrainTransformers.py)
|
| 21 |
+
|
| 22 |
+
We introduce MORFITT, the first multi-label corpus for the classification of specialties in the medical field, in French. MORFITT is composed of 3,624 summaries of scientific articles from PubMed, annotated in 12 specialties. The article details the corpus, the experiments and the preliminary results obtained using a classifier based on the pre-trained language model CamemBERT.
|
| 23 |
+
|
| 24 |
+
For more details, please refer to our paper:
|
| 25 |
+
|
| 26 |
+
**MORFITT: A multi-label topic classification for French Biomedical literature** ([HAL](https://hal.science/hal-04131591/))
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# Key Features
|
| 30 |
+
|
| 31 |
+
## Documents distribution
|
| 32 |
+
|
| 33 |
+
| Train | Dev | Test |
|
| 34 |
+
|-------|-------|-------|
|
| 35 |
+
| 1,514 | 1,022 | 1,088 |
|
| 36 |
+
|
| 37 |
+
## Multi-label distribution
|
| 38 |
+
|
| 39 |
+
| | Train | Dev | Test | Total |
|
| 40 |
+
|:----------------------:|:--------------:|:--------------:|:--------------:|:--------------:|
|
| 41 |
+
| Vétérinaire | 320 | 250 | 254 | 824 |
|
| 42 |
+
| Étiologie | 317 | 202 | 222 | 741 |
|
| 43 |
+
| Psychologie | 255 | 175 | 179 | 609 |
|
| 44 |
+
| Chirurgie | 223 | 169 | 157 | 549 |
|
| 45 |
+
| Génétique | 207 | 139 | 159 | 505 |
|
| 46 |
+
| Physiologie | 217 | 125 | 148 | 490 |
|
| 47 |
+
| Pharmacologie | 112 | 84 | 103 | 299 |
|
| 48 |
+
| Microbiologie | 115 | 72 | 86 | 273 |
|
| 49 |
+
| Immunologie | 106 | 86 | 70 | 262 |
|
| 50 |
+
| Chimie | 94 | 53 | 65 | 212 |
|
| 51 |
+
| Virologie | 76 | 57 | 67 | 200 |
|
| 52 |
+
| Parasitologie | 68 | 34 | 50 | 152 |
|
| 53 |
+
| Total | 2,110 | 1,446 | 1,560 | 5,116 |
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
## Number of labels per document distribution
|
| 57 |
+
|
| 58 |
+
<p align="left">
|
| 59 |
+
<img src="https://github.com/qanastek/MORFITT/raw/main/images/distributions_nbr_elements_colors.png" alt="drawing" width="400"/>
|
| 60 |
+
</p>
|
| 61 |
+
|
| 62 |
+
## Co-occurences distribution
|
| 63 |
+
|
| 64 |
+
<p align="left">
|
| 65 |
+
<img src="https://github.com/qanastek/MORFITT/raw/main/images/distributions_co-references-fixed.png" alt="drawing" width="400"/>
|
| 66 |
+
</p>
|
| 67 |
+
|
| 68 |
+
# If you use HuggingFace Transformers
|
| 69 |
+
|
| 70 |
+
```python
|
| 71 |
+
from datasets import load_dataset
|
| 72 |
+
dataset = load_dataset("qanastek/MORFITT")
|
| 73 |
+
print(dataset)
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
or
|
| 77 |
+
|
| 78 |
+
```python
|
| 79 |
+
from datasets import load_dataset
|
| 80 |
+
dataset_base = load_dataset(
|
| 81 |
+
'csv',
|
| 82 |
+
data_files={
|
| 83 |
+
'train': f"./train.tsv",
|
| 84 |
+
'validation': f"./dev.tsv",
|
| 85 |
+
'test': f"./test.tsv",
|
| 86 |
+
},
|
| 87 |
+
delimiter="\t",
|
| 88 |
+
)
|
| 89 |
+
```
|
| 90 |
+
|
| 91 |
+
# License and Citation
|
| 92 |
+
|
| 93 |
+
The code is under [Apache-2.0 License](./LICENSE).
|
| 94 |
+
|
| 95 |
+
The MORFITT dataset is licensed under *Attribution-ShareAlike 4.0 International* ([CC BY-SA 4.0](https://creativecommons.org/licenses/by/4.0/)).
|
| 96 |
+
If you find this project useful in your research, please cite the following papers:
|
| 97 |
+
|
| 98 |
+
```plain
|
| 99 |
+
Labrak, Y., Rouvier, M., & Dufour, R. (2023). MORFITT : Un corpus multi-labels d’articles scientifiques français dans le domaine biomédical. In F. Boudin, B. Daille, R. Dufour, O. Khettari, M. Houbre, L. Jourdan, & N. Kooli (Eds.), 18e Conférence en Recherche d’Information et Applications – 16e Rencontres Jeunes Chercheurs en RI – 30e Conférence sur le Traitement Automatique des Langues Naturelles – 25e Rencontre des Étudiants Chercheurs en Informatique pour le Traitement Automatique des Langues (pp. 66–70). ATALA. https://hal.science/hal-04131591
|
| 100 |
+
```
|
| 101 |
+
|
| 102 |
+
or using the bibtex:
|
| 103 |
+
|
| 104 |
+
```bibtex
|
| 105 |
+
@inproceedings{labrak:hal-04131591,
|
| 106 |
+
TITLE = {{MORFITT : Un corpus multi-labels d'articles scientifiques fran{\c c}ais dans le domaine biom{\'e}dical}},
|
| 107 |
+
AUTHOR = {Labrak, Yanis and Rouvier, Mickael and Dufour, Richard},
|
| 108 |
+
URL = {https://hal.science/hal-04131591},
|
| 109 |
+
BOOKTITLE = {{18e Conf{\'e}rence en Recherche d'Information et Applications -- 16e Rencontres Jeunes Chercheurs en RI -- 30e Conf{\'e}rence sur le Traitement Automatique des Langues Naturelles -- 25e Rencontre des {\'E}tudiants Chercheurs en Informatique pour le Traitement Automatique des Langues}},
|
| 110 |
+
ADDRESS = {Paris, France},
|
| 111 |
+
EDITOR = {Boudin, Florian and Daille, B{\'e}atrice and Dufour, Richard and Khettari, Oumaima and Houbre, Ma{\"e}l and Jourdan, L{\'e}ane and Kooli, Nihel},
|
| 112 |
+
PUBLISHER = {{ATALA}},
|
| 113 |
+
PAGES = {66-70},
|
| 114 |
+
YEAR = {2023},
|
| 115 |
+
KEYWORDS = {Analyse de documents scientifiques ; Jeux de donn{\'e}es compos{\'e}s des textes scientifiques},
|
| 116 |
+
PDF = {https://hal.science/hal-04131591/file/1465546.pdf},
|
| 117 |
+
HAL_ID = {hal-04131591},
|
| 118 |
+
HAL_VERSION = {v1},
|
| 119 |
+
}
|
| 120 |
+
```
|
data.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8fbb984a3eaa51fd58e3093b96f33b81de5114c2a0b98532d87bcc06341de95b
|
| 3 |
+
size 2017743
|
distributions_co-references-fixed.png
ADDED
|
Git LFS Details
|
distributions_nbr_elements_colors.png
ADDED
|
Git LFS Details
|
test_dataset.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datasets import load_dataset
|
| 2 |
+
|
| 3 |
+
dataset = load_dataset("morfitt.py")
|
| 4 |
+
|
| 5 |
+
print("#"*50)
|
| 6 |
+
print(dataset)
|
| 7 |
+
print("#"*50)
|
| 8 |
+
print(dataset["train"])
|
| 9 |
+
print("#"*50)
|
| 10 |
+
print(dataset["train"][0])
|
| 11 |
+
print("#"*50)
|
| 12 |
+
|
| 13 |
+
label_list = dataset["train"].features["labels"].feature.names
|
| 14 |
+
print(label_list)
|