MrLight commited on
Commit
131fc9c
·
verified ·
1 Parent(s): 335a816

Delete scifact.py

Browse files
Files changed (1) hide show
  1. scifact.py +0 -110
scifact.py DELETED
@@ -1,110 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """SciFact Dataset (Retrieval Only)"""
18
-
19
- import json
20
-
21
- import datasets
22
-
23
- _CITATION = """
24
- @inproceedings{Wadden2020FactOF,
25
- title={Fact or Fiction: Verifying Scientific Claims},
26
- author={David Wadden and Shanchuan Lin and Kyle Lo and Lucy Lu Wang and Madeleine van Zuylen and Arman Cohan and Hannaneh Hajishirzi},
27
- booktitle={EMNLP},
28
- year={2020},
29
- }
30
- """
31
-
32
- _DESCRIPTION = "dataset load script for SciFact"
33
-
34
- _DATASET_URLS = {
35
- 'train': "https://huggingface.co/datasets/Tevatron/scifact/resolve/main/train.jsonl.gz",
36
- 'dev': "https://huggingface.co/datasets/Tevatron/scifact/resolve/main/dev.jsonl.gz",
37
- 'test': "https://huggingface.co/datasets/Tevatron/scifact/resolve/main/test.jsonl.gz",
38
- }
39
-
40
-
41
- class Scifact(datasets.GeneratorBasedBuilder):
42
- VERSION = datasets.Version("0.0.1")
43
-
44
- BUILDER_CONFIGS = [
45
- datasets.BuilderConfig(version=VERSION,
46
- description="SciFact train/dev/test datasets"),
47
- ]
48
-
49
- def _info(self):
50
- features = datasets.Features({
51
- 'query_id': datasets.Value('string'),
52
- 'query': datasets.Value('string'),
53
- 'positive_passages': [
54
- {'docid': datasets.Value('string'), 'text': datasets.Value('string'),
55
- 'title': datasets.Value('string')}
56
- ],
57
- 'negative_passages': [
58
- {'docid': datasets.Value('string'), 'text': datasets.Value('string'),
59
- 'title': datasets.Value('string')}
60
- ],
61
- })
62
-
63
- return datasets.DatasetInfo(
64
- # This is the description that will appear on the datasets page.
65
- description=_DESCRIPTION,
66
- # This defines the different columns of the dataset and their types
67
- features=features, # Here we define them above because they are different between the two configurations
68
- supervised_keys=None,
69
- # Homepage of the dataset for documentation
70
- homepage="",
71
- # License for the dataset if available
72
- license="",
73
- # Citation for the dataset
74
- citation=_CITATION,
75
- )
76
-
77
- def _split_generators(self, dl_manager):
78
- downloaded_files = dl_manager.download_and_extract(_DATASET_URLS)
79
- splits = [
80
- datasets.SplitGenerator(
81
- name="train",
82
- gen_kwargs={
83
- "filepath": downloaded_files["train"],
84
- },
85
- ),
86
- datasets.SplitGenerator(
87
- name='dev',
88
- gen_kwargs={
89
- "filepath": downloaded_files["dev"],
90
- },
91
- ),
92
- datasets.SplitGenerator(
93
- name='test',
94
- gen_kwargs={
95
- "filepath": downloaded_files["test"],
96
- },
97
- ),
98
- ]
99
- return splits
100
-
101
- def _generate_examples(self, filepath):
102
- """Yields examples."""
103
- with open(filepath, encoding="utf-8") as f:
104
- for line in f:
105
- data = json.loads(line)
106
- if data.get('negative_passages') is None:
107
- data['negative_passages'] = []
108
- if data.get('positive_passages') is None:
109
- data['positive_passages'] = []
110
- yield data['query_id'], data