hollyyfc commited on
Commit
b638057
·
verified ·
1 Parent(s): 8d41bca

Upload tidytuesday_script.py

Browse files
Files changed (1) hide show
  1. tidytuesday_script.py +122 -0
tidytuesday_script.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import csv
17
+ import json
18
+ import os
19
+ from typing import List
20
+ import datasets
21
+ import logging
22
+
23
+
24
+ # Find for instance the citation on arxiv or on the dataset repo/website
25
+ _CITATION = """\
26
+ @InProceedings{huggingface:dataset,
27
+ title = {TidyTuesday for Python},
28
+ author={Holly Cui
29
+ },
30
+ year={2024}
31
+ }
32
+ """
33
+
34
+
35
+ _DESCRIPTION = """\
36
+ This dataset compiles TidyTuesday datasets from 2023-2024, aiming to make resources in the R community more accessible for Python users.
37
+ """
38
+
39
+
40
+ _HOMEPAGE = ""
41
+
42
+
43
+ _LICENSE = ""
44
+
45
+
46
+ _URLS = {
47
+ "train": "https://raw.githubusercontent.com/hollyyfc/tidytuesday-for-python/main/tidytuesday_json_train.json",
48
+ "validation": "https://raw.githubusercontent.com/hollyyfc/tidytuesday-for-python/main/tidytuesday_json_val.json",
49
+ }
50
+
51
+
52
+ class TidyTuesdayPython(datasets.GeneratorBasedBuilder):
53
+
54
+ _URLS = _URLS
55
+ VERSION = datasets.Version("1.1.0")
56
+
57
+
58
+ def _info(self):
59
+
60
+ return datasets.DatasetInfo(
61
+ description=_DESCRIPTION,
62
+ features=datasets.Features(
63
+ {
64
+ "date_posted": datasets.Value("string"),
65
+ "project_name": datasets.Value("string"),
66
+ "project_source": datasets.features.Sequence(datasets.Value("string")),
67
+ "description": datasets.Value("string"),
68
+ "data_source_url": datasets.Value("string"),
69
+ "data_dictionary": datasets.features.Sequence(
70
+ {
71
+ "variable": datasets.Value("string"),
72
+ "class": datasets.Value("string"),
73
+ "description": datasets.Value("string"),
74
+ }
75
+ ),
76
+ "data": datasets.features.Sequence(
77
+ {
78
+ "file_name": datasets.Value("string"),
79
+ "file_url": datasets.Value("string"),
80
+ }
81
+ ),
82
+ "data_load": datasets.features.Sequence(
83
+ {
84
+ "file_name": datasets.Value("string"),
85
+ "load_url": datasets.Value("string"),
86
+ }
87
+ ),
88
+ }
89
+ ),
90
+ # No default supervised_keys (as we have to pass both premise
91
+ supervised_keys=None,
92
+ # Homepage of the dataset for documentation
93
+ homepage=_HOMEPAGE,
94
+ # Citation for the dataset
95
+ citation=_CITATION,
96
+ )
97
+
98
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
99
+ urls_to_download = self._URLS
100
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
101
+ return [
102
+ datasets.SplitGenerator(
103
+ name=datasets.Split.TRAIN,
104
+ gen_kwargs={
105
+ "filepath": downloaded_files["train"]
106
+ }
107
+ ),
108
+ datasets.SplitGenerator(
109
+ name=datasets.Split.VALIDATION,
110
+ gen_kwargs={
111
+ "filepath": downloaded_files["validation"]
112
+ }
113
+ ),
114
+ ]
115
+
116
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
117
+ def _generate_examples(self, filepath):
118
+ logging.info("generating examples from = %s", filepath)
119
+ with open(filepath, "r") as j:
120
+ tidytuesday_json = json.load()
121
+ for record in tidytuesday_json:
122
+ yield record