Aremaki commited on
Commit
b185097
·
verified ·
1 Parent(s): 7b16aad

Upload folder using huggingface_hub

Browse files
.ipynb_checkpoints/EMEA-checkpoint.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import datasets
3
+
4
+ class MyDatasetConfig(datasets.BuilderConfig):
5
+ """Builder config for original vs processed."""
6
+ def __init__(self, **kwargs):
7
+ super().__init__(**kwargs)
8
+
9
+ class MyDataset(datasets.GeneratorBasedBuilder):
10
+
11
+ BUILDER_CONFIGS = [
12
+ MyDatasetConfig(
13
+ name="original",
14
+ version=datasets.Version("1.0.0"),
15
+ description="Original dataset",
16
+ ),
17
+ MyDatasetConfig(
18
+ name="processed",
19
+ version=datasets.Version("1.0.0"),
20
+ description="Processed dataset",
21
+ ),
22
+ ]
23
+
24
+ def _info(self):
25
+ return datasets.DatasetInfo() # features inferred automatically from Parquet
26
+
27
+ def _split_generators(self, dl_manager):
28
+
29
+ data_dir = self.config.name + "_data"
30
+
31
+ data_files = {
32
+ "train": os.path.join(data_dir, "train-00000-of-00001.parquet"),
33
+ "validation": os.path.join(data_dir, "validation-00000-of-00001.parquet"),
34
+ "test": os.path.join(data_dir, "test-00000-of-00001.parquet"),
35
+ }
36
+
37
+ downloaded_files = dl_manager.download(data_files)
38
+
39
+ return [
40
+ datasets.SplitGenerator(
41
+ name=datasets.Split.TRAIN,
42
+ gen_kwargs={"filepath": downloaded_files["train"]},
43
+ ),
44
+ datasets.SplitGenerator(
45
+ name=datasets.Split.VALIDATION,
46
+ gen_kwargs={"filepath": downloaded_files["validation"]},
47
+ ),
48
+ datasets.SplitGenerator(
49
+ name=datasets.Split.TEST,
50
+ gen_kwargs={"filepath": downloaded_files["test"]},
51
+ ),
52
+ ]
53
+
54
+ def _generate_examples(self, filepath):
55
+ import pyarrow.parquet as pq
56
+ table = pq.read_table(filepath)
57
+ for i in range(table.num_rows):
58
+ yield i, table.slice(i, 1).to_pydict()
EMEA.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import datasets
3
+
4
+ class MyDatasetConfig(datasets.BuilderConfig):
5
+ """Builder config for original vs processed."""
6
+ def __init__(self, **kwargs):
7
+ super().__init__(**kwargs)
8
+
9
+ class MyDataset(datasets.GeneratorBasedBuilder):
10
+
11
+ BUILDER_CONFIGS = [
12
+ MyDatasetConfig(
13
+ name="original",
14
+ version=datasets.Version("1.0.0"),
15
+ description="Original dataset",
16
+ ),
17
+ MyDatasetConfig(
18
+ name="processed",
19
+ version=datasets.Version("1.0.0"),
20
+ description="Processed dataset",
21
+ ),
22
+ ]
23
+
24
+ def _info(self):
25
+ return datasets.DatasetInfo() # features inferred automatically from Parquet
26
+
27
+ def _split_generators(self, dl_manager):
28
+
29
+ data_dir = self.config.name + "_data"
30
+
31
+ data_files = {
32
+ "train": os.path.join(data_dir, "train-00000-of-00001.parquet"),
33
+ "validation": os.path.join(data_dir, "validation-00000-of-00001.parquet"),
34
+ "test": os.path.join(data_dir, "test-00000-of-00001.parquet"),
35
+ }
36
+
37
+ downloaded_files = dl_manager.download(data_files)
38
+
39
+ return [
40
+ datasets.SplitGenerator(
41
+ name=datasets.Split.TRAIN,
42
+ gen_kwargs={"filepath": downloaded_files["train"]},
43
+ ),
44
+ datasets.SplitGenerator(
45
+ name=datasets.Split.VALIDATION,
46
+ gen_kwargs={"filepath": downloaded_files["validation"]},
47
+ ),
48
+ datasets.SplitGenerator(
49
+ name=datasets.Split.TEST,
50
+ gen_kwargs={"filepath": downloaded_files["test"]},
51
+ ),
52
+ ]
53
+
54
+ def _generate_examples(self, filepath):
55
+ import pyarrow.parquet as pq
56
+ table = pq.read_table(filepath)
57
+ for i in range(table.num_rows):
58
+ yield i, table.slice(i, 1).to_pydict()
README.md CHANGED
@@ -1,3 +1,59 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ configs:
4
+ - config_name: default
5
+ data_files:
6
+ - split: train
7
+ path: original_data/train-*
8
+ - split: validation
9
+ path: original_data/validation-*
10
+ - split: test
11
+ path: original_data/test-*
12
+ task_categories:
13
+ - text-classification
14
+ - text-generation
15
+ - zero-shot-classification
16
+ - token-classification
17
+ language:
18
+ - fr
19
+ tags:
20
+ - UMLS
21
+ - entity_linking
22
+ - biomedical
23
+ size_categories:
24
+ - 1K<n<10K
25
+ dataset_info:
26
+ features:
27
+ - name: id
28
+ dtype: string
29
+ - name: document_id
30
+ dtype: string
31
+ - name: entities
32
+ list:
33
+ - name: id
34
+ dtype: string
35
+ - name: normalized
36
+ list:
37
+ - name: db_id
38
+ dtype: string
39
+ - name: db_name
40
+ dtype: string
41
+ - name: offsets
42
+ sequence:
43
+ sequence: int64
44
+ - name: text
45
+ sequence: string
46
+ - name: type
47
+ dtype: string
48
+ - name: passages
49
+ list:
50
+ - name: id
51
+ dtype: string
52
+ - name: offsets
53
+ sequence:
54
+ sequence: int64
55
+ - name: text
56
+ sequence: string
57
+ - name: type
58
+ dtype: string
59
+ ---
original_data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edc995c4f6fcf164dabdae65d40e57131e13026d2f7c54d8499a8fb480c6ca00
3
+ size 82297
original_data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bec64ac095eb3fbc03a176ab8cfdaa18b4279aaf77aeceea5657d21b9df9e59b
3
+ size 112647
original_data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b14078d3b7d5841430f32cfcd6a8dd89e839016261fee6996a83949d9a0ab90
3
+ size 93576
processed_data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:835de8984ce3671574f60f6564fe23668e6fee8627cd734b2f12121e6115c329
3
+ size 81212
processed_data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce3f6fdff68245dc8155fd7366a610325a3b5f8a26b96b859b500b3a1f4ed398
3
+ size 111266
processed_data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d6e02f75376dd604f5afbd60810013ab6f4234a0608aa3d47363b8fb1af5329
3
+ size 91948