ygorg commited on
Commit
d7be25e
·
verified ·
1 Parent(s): 9fe5bed

Delete loading script

Browse files
Files changed (1) hide show
  1. CLISTER.py +0 -159
CLISTER.py DELETED
@@ -1,159 +0,0 @@
1
- import json
2
- import random
3
-
4
- import datasets
5
- import numpy as np
6
- import pandas as pd
7
-
8
- _CITATION = """\
9
- @inproceedings{hiebel:cea-03740484,
10
- TITLE = {{CLISTER: A corpus for semantic textual similarity in French clinical narratives}},
11
- AUTHOR = {Hiebel, Nicolas and Ferret, Olivier and Fort, Kar{\"e}n and N{\'e}v{\'e}ol, Aur{\'e}lie},
12
- URL = {https://hal-cea.archives-ouvertes.fr/cea-03740484},
13
- BOOKTITLE = {{LREC 2022 - 13th Language Resources and Evaluation Conference}},
14
- ADDRESS = {Marseille, France},
15
- PUBLISHER = {{European Language Resources Association}},
16
- SERIES = {LREC 2022 - Proceedings of the 13th Conference on Language Resources and Evaluation},
17
- VOLUME = {2022},
18
- PAGES = {4306‑4315},
19
- YEAR = {2022},
20
- MONTH = Jun,
21
- KEYWORDS = {Semantic Similarity ; Corpus Development ; Clinical Text ; French ; Semantic Similarity},
22
- PDF = {https://hal-cea.archives-ouvertes.fr/cea-03740484/file/2022.lrec-1.459.pdf},
23
- HAL_ID = {cea-03740484},
24
- HAL_VERSION = {v1},
25
- }
26
- """
27
-
28
- _DESCRIPTION = """\
29
- Modern Natural Language Processing relies on the availability of annotated corpora for training and \
30
- evaluating models. Such resources are scarce, especially for specialized domains in languages other \
31
- than English. In particular, there are very few resources for semantic similarity in the clinical domain \
32
- in French. This can be useful for many biomedical natural language processing applications, including \
33
- text generation. We introduce a definition of similarity that is guided by clinical facts and apply it \
34
- to the development of a new French corpus of 1,000 sentence pairs manually annotated according to \
35
- similarity scores. This new sentence similarity corpus is made freely available to the community. We \
36
- further evaluate the corpus through experiments of automatic similarity measurement. We show that a \
37
- model of sentence embeddings can capture similarity with state of the art performance on the DEFT STS \
38
- shared task evaluation data set (Spearman=0.8343). We also show that the CLISTER corpus is complementary \
39
- to DEFT STS. \
40
- """
41
-
42
- _HOMEPAGE = "https://gitlab.inria.fr/codeine/clister"
43
-
44
- _LICENSE = "unknown"
45
-
46
- _URL = "data.zip"
47
-
48
-
49
- class CLISTER(datasets.GeneratorBasedBuilder):
50
-
51
- DEFAULT_CONFIG_NAME = "source"
52
-
53
- BUILDER_CONFIGS = [
54
- datasets.BuilderConfig(name="source", version="1.0.0", description="The CLISTER corpora"),
55
- ]
56
-
57
- def _info(self):
58
-
59
- features = datasets.Features(
60
- {
61
- "id": datasets.Value("string"),
62
- "document_1_id": datasets.Value("string"),
63
- "document_2_id": datasets.Value("string"),
64
- "text_1": datasets.Value("string"),
65
- "text_2": datasets.Value("string"),
66
- "label": datasets.Value("float"),
67
- }
68
- )
69
-
70
- return datasets.DatasetInfo(
71
- description=_DESCRIPTION,
72
- features=features,
73
- supervised_keys=None,
74
- homepage=_HOMEPAGE,
75
- license=str(_LICENSE),
76
- citation=_CITATION,
77
- )
78
-
79
- def _split_generators(self, dl_manager):
80
-
81
- data_dir = dl_manager.download_and_extract(_URL).rstrip("/")
82
-
83
- return [
84
- datasets.SplitGenerator(
85
- name=datasets.Split.TRAIN,
86
- gen_kwargs={
87
- "csv_file": data_dir + "/train.csv",
88
- "json_file": data_dir + "/id_to_sentence_train.json",
89
- "split": "train",
90
- },
91
- ),
92
- datasets.SplitGenerator(
93
- name=datasets.Split.VALIDATION,
94
- gen_kwargs={
95
- "csv_file": data_dir + "/train.csv",
96
- "json_file": data_dir + "/id_to_sentence_train.json",
97
- "split": "validation",
98
- },
99
- ),
100
- datasets.SplitGenerator(
101
- name=datasets.Split.TEST,
102
- gen_kwargs={
103
- "csv_file": data_dir + "/test.csv",
104
- "json_file": data_dir + "/id_to_sentence_test.json",
105
- "split": "test",
106
- },
107
- ),
108
- ]
109
-
110
- def _generate_examples(self, csv_file, json_file, split):
111
-
112
- all_res = []
113
-
114
- key = 0
115
-
116
- # Load JSON file
117
- f_json = open(json_file)
118
- data_map = json.load(f_json)
119
- f_json.close()
120
-
121
- # Load CSV file
122
- df = pd.read_csv(csv_file, sep="\t")
123
-
124
- for index, e in df.iterrows():
125
-
126
- all_res.append({
127
- "id": str(key),
128
- "document_1_id": e["id_1"],
129
- "document_2_id": e["id_2"],
130
- "text_1": data_map["_".join(e["id_1"].split("_")[0:2])],
131
- "text_2": data_map["_".join(e["id_2"].split("_")[0:2])],
132
- "label": float(e["sim"]),
133
- })
134
-
135
- key += 1
136
-
137
- if split != "test":
138
-
139
- ids = [r["id"] for r in all_res]
140
-
141
- random.seed(4)
142
- random.shuffle(ids)
143
- random.shuffle(ids)
144
- random.shuffle(ids)
145
-
146
- train, validation = np.split(ids, [int(len(ids)*0.8333)])
147
-
148
- if split == "train":
149
- allowed_ids = list(train)
150
- elif split == "validation":
151
- allowed_ids = list(validation)
152
-
153
- for r in all_res:
154
- if r["id"] in allowed_ids:
155
- yield r["id"], r
156
- else:
157
-
158
- for r in all_res:
159
- yield r["id"], r