SaulLu commited on
Commit
7d10c75
·
1 Parent(s): 0d6b8f2

add loading script

Browse files
Files changed (1) hide show
  1. RenderedSST2.py +121 -0
RenderedSST2.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+ from pathlib import Path
22
+
23
+ import datasets
24
+
25
+ _CITATION = """\
26
+ @article{DBLP:journals/corr/abs-2103-00020,
27
+ author = {Alec Radford and
28
+ Jong Wook Kim and
29
+ Chris Hallacy and
30
+ Aditya Ramesh and
31
+ Gabriel Goh and
32
+ Sandhini Agarwal and
33
+ Girish Sastry and
34
+ Amanda Askell and
35
+ Pamela Mishkin and
36
+ Jack Clark and
37
+ Gretchen Krueger and
38
+ Ilya Sutskever},
39
+ title = {Learning Transferable Visual Models From Natural Language Supervision},
40
+ journal = {CoRR},
41
+ volume = {abs/2103.00020},
42
+ year = {2021},
43
+ url = {https://arxiv.org/abs/2103.00020},
44
+ eprinttype = {arXiv},
45
+ eprint = {2103.00020},
46
+ timestamp = {Thu, 04 Mar 2021 17:00:40 +0100},
47
+ biburl = {https://dblp.org/rec/journals/corr/abs-2103-00020.bib},
48
+ bibsource = {dblp computer science bibliography, https://dblp.org}
49
+ }
50
+ """
51
+
52
+ # TODO: Add description of the dataset here
53
+ # You can copy an official description
54
+ _DESCRIPTION = """\
55
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
56
+ """
57
+
58
+ _HOMEPAGE = "https://github.com/openai/CLIP/blob/main/data/rendered-sst2.md"
59
+
60
+ # TODO: Add the licence for the dataset here if you can find it
61
+ _LICENSE = ""
62
+
63
+ _URL = "https://openaipublic.azureedge.net/clip/data/rendered-sst2.tgz"
64
+
65
+ _NAMES = ["negative", "positive"]
66
+
67
+
68
+ class SST2Dataset(datasets.GeneratorBasedBuilder):
69
+
70
+ VERSION = datasets.Version("1.0.0")
71
+
72
+ def _info(self):
73
+ features = datasets.Features(
74
+ {
75
+ "image": datasets.Image(),
76
+ "label": datasets.ClassLabel(names=_NAMES),
77
+ }
78
+ )
79
+
80
+ return datasets.DatasetInfo(
81
+ description=_DESCRIPTION,
82
+ features=features,
83
+ homepage=_HOMEPAGE,
84
+ license=_LICENSE,
85
+ citation=_CITATION,
86
+ )
87
+
88
+ def _split_generators(self, dl_manager):
89
+ data_dir = dl_manager.download_and_extract(_URL)
90
+ data_dir = Path(data_dir) / "rendered-sst2"
91
+ return [
92
+ datasets.SplitGenerator(
93
+ name=datasets.Split.TRAIN,
94
+ gen_kwargs={
95
+ "dir": data_dir / "train",
96
+ },
97
+ ),
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.VALIDATION,
100
+ gen_kwargs={
101
+ "dir": data_dir / "valid",
102
+ },
103
+ ),
104
+ datasets.SplitGenerator(
105
+ name=datasets.Split.TEST,
106
+ gen_kwargs={
107
+ "dir": data_dir / "test",
108
+ },
109
+ ),
110
+ ]
111
+
112
+ def _generate_examples(self, dir):
113
+ index = -1
114
+ for image_path in (dir / "negative").iterdir():
115
+ index += 1
116
+ record = {"label": "negative", "image": str(image_path)}
117
+ yield index, record
118
+ for image_path in (dir / "positive").iterdir():
119
+ index += 1
120
+ record = {"label": "negative", "image": str(image_path)}
121
+ yield index, record