cnachteg commited on
Commit
611f74c
·
1 Parent(s): 720d5a1

Delete duvel.py

Browse files

Check if automated loading works

Files changed (1) hide show
  1. duvel.py +0 -121
duvel.py DELETED
@@ -1,121 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """DUVEL : the Detection of Unlimited Variant Ensemble in Literature"""
16
-
17
-
18
- import csv
19
- import datasets
20
-
21
-
22
- # TODO: Add BibTeX citation
23
- # Find for instance the citation on arxiv or on the dataset repo/website
24
- _CITATION = """\
25
- @InProceedings{huggingface:dataset,
26
- title = {A great new dataset},
27
- author={huggingface, Inc.
28
- },
29
- year={2020}
30
- }
31
- """
32
-
33
-
34
- _DESCRIPTION = """\
35
- This dataset was created to identity oligogenic variant combinations, i.e. relation between several genes and their mutations, \
36
- causing genetic diseases in scientific articles written in english. At the moment, it contains only digenic variant combinations, \
37
- i.e. relations between two genes and at least two variants. The dataset is intended for binary relation extraction where the \
38
- entities are masked within the text.
39
- """
40
-
41
- _HOMEPAGE = "https://github.com/cnachteg/DUVEL"
42
-
43
- _LICENSE = "cc-by-nc-sa-4.0"
44
-
45
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
46
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
47
- _URL = "https://raw.githubusercontent.com/cnachteg/DUVEL/main/"
48
- _URLS = {
49
- "train": _URL + "data/train.csv",
50
- "dev": _URL + "data/validation.csv",
51
- "test": _URL + "data/test.csv"
52
- }
53
-
54
-
55
-
56
- class DUVEL(datasets.GeneratorBasedBuilder):
57
- """DUVEL : the Detection of Unlimited Variant Ensemble in Literature - Version 1.1."""
58
-
59
- VERSION = datasets.Version("1.1.0")
60
-
61
- def _info(self):
62
- features = datasets.Features(
63
- {
64
- 'sentence': datasets.Value('string'),
65
- 'pmcid': datasets.Value('int32'),
66
- 'gene1': datasets.Value('string'),
67
- 'gene2': datasets.Value('string'),
68
- 'variant1': datasets.Value('string'),
69
- 'variant2': datasets.Value('string'),
70
- 'label': datasets.ClassLabel(names=[0,1])
71
- }
72
- )
73
- return datasets.DatasetInfo(
74
- # This is the description that will appear on the datasets page.
75
- description=_DESCRIPTION,
76
- # This defines the different columns of the dataset and their types
77
- features=features, # Here we define them above because they are different between the two configurations
78
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
79
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
80
- # supervised_keys=("sentence", "label"),
81
- # Homepage of the dataset for documentation
82
- homepage=_HOMEPAGE,
83
- # License for the dataset if available
84
- license=_LICENSE,
85
- # Citation for the dataset
86
- citation=_CITATION,
87
- task_templates=[
88
- datasets.tasks.TextClassification(
89
- text_column='sentence', label_column='label'
90
- )
91
- ],
92
- )
93
-
94
- def _split_generators(self, dl_manager):
95
- downloaded_files = dl_manager.download_and_extract(_URLS)
96
- return [
97
- datasets.SplitGenerator(
98
- name=datasets.Split.TRAIN,
99
- gen_kwargs={
100
- "filepath": downloaded_files['train']
101
- },
102
- ),
103
- datasets.SplitGenerator(
104
- name=datasets.Split.VALIDATION,
105
- gen_kwargs={
106
- "filepath": downloaded_files['dev'],
107
- },
108
- ),
109
- datasets.SplitGenerator(
110
- name=datasets.Split.TEST,
111
- gen_kwargs={
112
- "filepath": downloaded_files['test'],
113
- },
114
- ),
115
- ]
116
-
117
- def _generate_examples(self, filepath):
118
- with open(filepath, encoding="utf-8") as f:
119
- reader = csv.DictReader(f)
120
- for key, row in enumerate(reader):
121
- yield key, row