ygorg commited on
Commit
1198432
·
1 Parent(s): 0123084

Keep original files for reproduction.

Browse files
Files changed (1) hide show
  1. _attic/PxCorpus.py +178 -0
_attic/PxCorpus.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pip install bs4 syntok
2
+
3
+ import os
4
+ import random
5
+
6
+ import datasets
7
+
8
+ import numpy as np
9
+ from bs4 import BeautifulSoup, ResultSet
10
+ from syntok.tokenizer import Tokenizer
11
+
12
+ tokenizer = Tokenizer()
13
+
14
+ _CITATION = r"""\
15
+ @InProceedings{Kocabiyikoglu2022,
16
+ author = "Alican Kocabiyikoglu and Fran{\c c}ois Portet and Prudence Gibert and Hervé Blanchon and Jean-Marc Babouchkine and Gaëtan Gavazzi",
17
+ title = "A Spoken Drug Prescription Dataset in French for Spoken Language Understanding",
18
+ booktitle = "13th Language Resources and Evaluation Conference (LREC 2022)",
19
+ year = "2022",
20
+ location = "Marseille, France"
21
+ }
22
+ """
23
+
24
+ _DESCRIPTION = """\
25
+ PxSLU is to the best of our knowledge, the first spoken medical drug prescriptions corpus to be distributed. It contains 4 hours of transcribed
26
+ and annotated dialogues of drug prescriptions in French acquired through an experiment with 55 participants experts and non-experts in drug prescriptions.
27
+
28
+ The automatic transcriptions were verified by human effort and aligned with semantic labels to allow training of NLP models. The data acquisition
29
+ protocol was reviewed by medical experts and permit free distribution without breach of privacy and regulation.
30
+
31
+ Overview of the Corpus
32
+
33
+ The experiment has been performed in wild conditions with naive participants and medical experts. In total, the dataset includes 1981 recordings
34
+ of 55 participants (38% non-experts, 25% doctors, 36% medical practitioners), manually transcribed and semantically annotated.
35
+ """
36
+
37
+ _URL = "https://zenodo.org/record/6524162/files/pxslu.zip?download=1"
38
+
39
+
40
+ class PxCorpus(datasets.GeneratorBasedBuilder):
41
+
42
+ BUILDER_CONFIGS = [
43
+ datasets.BuilderConfig(name=f"default", version="1.0.0", description=f"PxCorpus data"),
44
+ ]
45
+
46
+ DEFAULT_CONFIG_NAME = "default"
47
+
48
+ def _info(self):
49
+
50
+ features = datasets.Features(
51
+ {
52
+ "id": datasets.Value("string"),
53
+ "text": datasets.Value("string"),
54
+ "label": datasets.features.ClassLabel(
55
+ names=["medical_prescription", "negate", "none", "replace"],
56
+ ),
57
+ "tokens": datasets.Sequence(datasets.Value("string")),
58
+ "ner_tags": datasets.Sequence(
59
+ datasets.features.ClassLabel(
60
+ names=['O', 'B-A', 'B-cma_event', 'B-d_dos_form', 'B-d_dos_form_ext', 'B-d_dos_up', 'B-d_dos_val', 'B-dos_cond', 'B-dos_uf', 'B-dos_val', 'B-drug', 'B-dur_ut', 'B-dur_val', 'B-fasting', 'B-freq_days', 'B-freq_int_v1', 'B-freq_int_v1_ut', 'B-freq_int_v2', 'B-freq_int_v2_ut', 'B-freq_startday', 'B-freq_ut', 'B-freq_val', 'B-inn', 'B-max_unit_uf', 'B-max_unit_ut', 'B-max_unit_val', 'B-min_gap_ut', 'B-min_gap_val', 'B-qsp_ut', 'B-qsp_val', 'B-re_ut', 'B-re_val', 'B-rhythm_hour', 'B-rhythm_perday', 'B-rhythm_rec_ut', 'B-rhythm_rec_val', 'B-rhythm_tdte', 'B-roa', 'I-cma_event', 'I-d_dos_form', 'I-d_dos_form_ext', 'I-d_dos_up', 'I-d_dos_val', 'I-dos_cond', 'I-dos_uf', 'I-dos_val', 'I-drug', 'I-fasting', 'I-freq_startday', 'I-inn', 'I-rhythm_tdte', 'I-roa'],
61
+ ),
62
+ ),
63
+ }
64
+ )
65
+
66
+ return datasets.DatasetInfo(
67
+ description=_DESCRIPTION,
68
+ features=features,
69
+ citation=_CITATION,
70
+ supervised_keys=None,
71
+ )
72
+
73
+ def _split_generators(self, dl_manager):
74
+
75
+ data_dir = dl_manager.download_and_extract(_URL)
76
+
77
+ return [
78
+ datasets.SplitGenerator(
79
+ name=datasets.Split.TRAIN,
80
+ gen_kwargs={
81
+ "filepath_1": os.path.join(data_dir, "seq.in"),
82
+ "filepath_2": os.path.join(data_dir, "seq.label"),
83
+ "filepath_3": os.path.join(data_dir, "PxSLU_conll.txt"),
84
+ "split": "train",
85
+ },
86
+ ),
87
+ datasets.SplitGenerator(
88
+ name=datasets.Split.VALIDATION,
89
+ gen_kwargs={
90
+ "filepath_1": os.path.join(data_dir, "seq.in"),
91
+ "filepath_2": os.path.join(data_dir, "seq.label"),
92
+ "filepath_3": os.path.join(data_dir, "PxSLU_conll.txt"),
93
+ "split": "validation",
94
+ },
95
+ ),
96
+ datasets.SplitGenerator(
97
+ name=datasets.Split.TEST,
98
+ gen_kwargs={
99
+ "filepath_1": os.path.join(data_dir, "seq.in"),
100
+ "filepath_2": os.path.join(data_dir, "seq.label"),
101
+ "filepath_3": os.path.join(data_dir, "PxSLU_conll.txt"),
102
+ "split": "test",
103
+ },
104
+ ),
105
+ ]
106
+
107
+ def getTokenTags(self, document):
108
+
109
+ tokens = []
110
+ ner_tags = []
111
+
112
+ for pair in document.split("\n"):
113
+
114
+ if len(pair) <= 0:
115
+ continue
116
+
117
+ text, label = pair.split("\t")
118
+ tokens.append(text)
119
+ ner_tags.append(label)
120
+
121
+ return tokens, ner_tags
122
+
123
+ def _generate_examples(self, filepath_1, filepath_2, filepath_3, split):
124
+
125
+ key = 0
126
+ all_res = []
127
+
128
+ f_seq_in = open(filepath_1, "r")
129
+ seq_in = f_seq_in.read().split("\n")
130
+ f_seq_in.close()
131
+
132
+ f_seq_label = open(filepath_2, "r")
133
+ seq_label = f_seq_label.read().split("\n")
134
+ f_seq_label.close()
135
+
136
+ f_in_ner = open(filepath_3, "r")
137
+ docs = f_in_ner.read().split("\n\n")
138
+ f_in_ner.close()
139
+
140
+ for idx, doc in enumerate(docs):
141
+
142
+ text = seq_in[idx]
143
+ label = seq_label[idx]
144
+
145
+ tokens, ner_tags = self.getTokenTags(docs[idx])
146
+
147
+ if len(text) <= 0 or len(label) <= 0:
148
+ continue
149
+
150
+ all_res.append({
151
+ "id": key,
152
+ "text": text,
153
+ "label": label,
154
+ "tokens": tokens,
155
+ "ner_tags": ner_tags,
156
+ })
157
+
158
+ key += 1
159
+
160
+ ids = [r["id"] for r in all_res]
161
+
162
+ random.seed(4)
163
+ random.shuffle(ids)
164
+ random.shuffle(ids)
165
+ random.shuffle(ids)
166
+
167
+ train, validation, test = np.split(ids, [int(len(ids) * .7), int(len(ids) * .8)])
168
+
169
+ if split == "train":
170
+ allowed_ids = list(train)
171
+ elif split == "validation":
172
+ allowed_ids = list(validation)
173
+ elif split == "test":
174
+ allowed_ids = list(test)
175
+
176
+ for r in all_res:
177
+ if r["id"] in allowed_ids:
178
+ yield r["id"], r