cathv commited on
Commit
b2f2220
·
verified ·
1 Parent(s): 41fdf1c

Delete loading script

Browse files
Files changed (1) hide show
  1. test.py +0 -276
test.py DELETED
@@ -1,276 +0,0 @@
1
- import pandas as pd
2
- import datasets
3
- import numpy as np
4
- import ast
5
- from PIL import Image
6
-
7
- class InvasivePlantsConfig(datasets.BuilderConfig):
8
-
9
- def __init__(self, **kwargs):
10
- super(InvasivePlantsConfig, self).__init__(**kwargs)
11
-
12
- class InvasivePlantsDataset(datasets.GeneratorBasedBuilder):
13
-
14
- BUILDER_CONFIGS = [
15
- InvasivePlantsConfig(
16
- name="Kenya",
17
- version=datasets.Version("1.0.0"),
18
- description="Subset of the Dataset containing Hotspots from Kenya"
19
- ),
20
- InvasivePlantsConfig(
21
- name="South_Africa",
22
- version=datasets.Version("1.0.0"),
23
- description="Subset of the Dataset containing Hotspots from South Africa"
24
- ),
25
- InvasivePlantsConfig(
26
- name="USA_Summer",
27
- version=datasets.Version("1.0.0"),
28
- description="Subset of the Dataset containing Hotspots from the United States (Summer Season)"
29
- ),
30
- InvasivePlantsConfig(
31
- name="USA_Winter",
32
- version=datasets.Version("1.0.0"),
33
- description="Subset of the Dataset containing Hotspots from the United States (Winter Season) "
34
- ),
35
- InvasivePlantsConfig(
36
- name="Species_ID",
37
- version=datasets.Version("1.0.0"),
38
- description="Subset containing the list of species for each country"
39
- )
40
- ]
41
-
42
- DEFAULT_CONFIG_NAME = "Species_ID"
43
-
44
- def _info(self):
45
- #state,state_code,split,num_complete_checklists,target,geometry
46
-
47
-
48
- if self.config.name == "Kenya":
49
- features = datasets.Features({
50
- "hotspot_id": datasets.Value("string"),
51
- "lon": datasets.Value("float32"),
52
- "lat": datasets.Value("float32"),
53
- 'sat_imagery_path': datasets.Value("string"),
54
- 'environmental_path': datasets.Value("string"),
55
- "target_path": datasets.Value("string"),
56
- "bio_1": datasets.Value("float32"),
57
- "bio_2": datasets.Value("float32"),
58
- "bio_3": datasets.Value("float32"),
59
- "bio_4": datasets.Value("float32"),
60
- "bio_5": datasets.Value("float32"),
61
- "bio_6": datasets.Value("float32"),
62
- "bio_7": datasets.Value("float32"),
63
- "bio_8": datasets.Value("float32"),
64
- "bio_9": datasets.Value("float32"),
65
- "bio_10": datasets.Value("float32"),
66
- "bio_11": datasets.Value("float32"),
67
- "bio_12": datasets.Value("float32"),
68
- "bio_13": datasets.Value("float32"),
69
- "bio_14": datasets.Value("float32"),
70
- "bio_15": datasets.Value("float32"),
71
- "bio_16": datasets.Value("float32"),
72
- "bio_17": datasets.Value("float32"),
73
- "bio_18": datasets.Value("float32"),
74
- "bio_19": datasets.Value("float32"),
75
- "num_complete_checklists" : datasets.Value("int32"),
76
- "split": datasets.Value("string"),
77
- })
78
- elif self.config.name == "South_Africa":
79
- features = datasets.Features({
80
- "hotspot_id": datasets.Value("string"),
81
- "lon": datasets.Value("float32"),
82
- "lat": datasets.Value("float32"),
83
- 'sat_imagery_path': datasets.Value("string"),
84
- 'environmental_path': datasets.Value("string"),
85
- "target_path": datasets.Value("string"),
86
- "bio_1": datasets.Value("float32"),
87
- "bio_2": datasets.Value("float32"),
88
- "bio_3": datasets.Value("float32"),
89
- "bio_4": datasets.Value("float32"),
90
- "bio_5": datasets.Value("float32"),
91
- "bio_6": datasets.Value("float32"),
92
- "bio_7": datasets.Value("float32"),
93
- "bio_8": datasets.Value("float32"),
94
- "bio_9": datasets.Value("float32"),
95
- "bio_10": datasets.Value("float32"),
96
- "bio_11": datasets.Value("float32"),
97
- "bio_12": datasets.Value("float32"),
98
- "bio_13": datasets.Value("float32"),
99
- "bio_14": datasets.Value("float32"),
100
- "bio_15": datasets.Value("float32"),
101
- "bio_16": datasets.Value("float32"),
102
- "bio_17": datasets.Value("float32"),
103
- "bio_18": datasets.Value("float32"),
104
- "bio_19": datasets.Value("float32"),
105
- "num_complete_checklists" : datasets.Value("int32"),
106
- "split": datasets.Value("string"),
107
- })
108
- elif self.config.name == "USA_Summer":
109
- features = datasets.Features({
110
- "hotspot_id": datasets.Value("string"),
111
- "lon": datasets.Value("float32"),
112
- "lat": datasets.Value("float32"),
113
- 'sat_imagery_path': datasets.Value("string"),
114
- 'environmental_path': datasets.Value("string"),
115
- "target_path": datasets.Value("string"),
116
- "bio_1": datasets.Value("float32"),
117
- "bio_2": datasets.Value("float32"),
118
- "bio_3": datasets.Value("float32"),
119
- "bio_4": datasets.Value("float32"),
120
- "bio_5": datasets.Value("float32"),
121
- "bio_6": datasets.Value("float32"),
122
- "bio_7": datasets.Value("float32"),
123
- "bio_8": datasets.Value("float32"),
124
- "bio_9": datasets.Value("float32"),
125
- "bio_10": datasets.Value("float32"),
126
- "bio_11": datasets.Value("float32"),
127
- "bio_12": datasets.Value("float32"),
128
- "bio_13": datasets.Value("float32"),
129
- "bio_14": datasets.Value("float32"),
130
- "bio_15": datasets.Value("float32"),
131
- "bio_16": datasets.Value("float32"),
132
- "bio_17": datasets.Value("float32"),
133
- "bio_18": datasets.Value("float32"),
134
- "bio_19": datasets.Value("float32"),
135
- "num_complete_checklists" : datasets.Value("int32"),
136
- "split": datasets.Value("string"),
137
- })
138
- elif self.config.name == "USA_Winter":
139
- features = datasets.Features({
140
- "hotspot_id": datasets.Value("string"),
141
- "lon": datasets.Value("float32"),
142
- "lat": datasets.Value("float32"),
143
- 'sat_imagery_path': datasets.Value("string"),
144
- 'environmental_path': datasets.Value("string"),
145
- "target_path": datasets.Value("string"),
146
- "bio_1": datasets.Value("float32"),
147
- "bio_2": datasets.Value("float32"),
148
- "bio_3": datasets.Value("float32"),
149
- "bio_4": datasets.Value("float32"),
150
- "bio_5": datasets.Value("float32"),
151
- "bio_6": datasets.Value("float32"),
152
- "bio_7": datasets.Value("float32"),
153
- "bio_8": datasets.Value("float32"),
154
- "bio_9": datasets.Value("float32"),
155
- "bio_10": datasets.Value("float32"),
156
- "bio_11": datasets.Value("float32"),
157
- "bio_12": datasets.Value("float32"),
158
- "bio_13": datasets.Value("float32"),
159
- "bio_14": datasets.Value("float32"),
160
- "bio_15": datasets.Value("float32"),
161
- "bio_16": datasets.Value("float32"),
162
- "bio_17": datasets.Value("float32"),
163
- "bio_18": datasets.Value("float32"),
164
- "bio_19": datasets.Value("float32"),
165
- "num_complete_checklists" : datasets.Value("int32"),
166
- "split": datasets.Value("string"),
167
- })
168
- elif self.config.name == "Species_ID":
169
- features = datasets.Features({
170
- "scientific_name": datasets.Value("string"),
171
- "ebird_code": datasets.Value("string"),
172
- "inat_preview": datasets.Value("string"),
173
- })
174
- else:
175
- raise ValueError(f"Unsupported config: {self.config.name}")
176
-
177
- return datasets.DatasetInfo(
178
- description="The BATIS Benchmark Dataset",
179
- features=features,
180
- supervised_keys=None,
181
- homepage="https://huggingface.co/datasets/cathv/test",
182
- license="CC-BY-NC-4.0",
183
- )
184
-
185
- def _split_generators(self, dl_manager):
186
- if self.config.name == "Kenya":
187
- train_csv = "Kenya/train_filtered.csv"
188
- test_csv = "Kenya/test_filtered.csv"
189
- val_csv = "Kenya/valid_filtered.csv"
190
- return [
191
- datasets.SplitGenerator(name="train", gen_kwargs={"filepath": train_csv}),
192
- datasets.SplitGenerator(name="val", gen_kwargs={"filepath": test_csv}),
193
- datasets.SplitGenerator(name="test", gen_kwargs={"filepath": val_csv}),
194
- ]
195
- elif self.config.name == "South_Africa":
196
- train_csv = "South_Africa/train_filtered.csv"
197
- test_csv = "South_Africa/test_filtered.csv"
198
- val_csv = "South_Africa/valid_filtered.csv"
199
- return [
200
- datasets.SplitGenerator(name="train", gen_kwargs={"filepath": train_csv}),
201
- datasets.SplitGenerator(name="val", gen_kwargs={"filepath": test_csv}),
202
- datasets.SplitGenerator(name="test", gen_kwargs={"filepath": val_csv}),
203
- ]
204
- elif self.config.name == "USA_Winter":
205
- train_csv = "USA_Winter/train_filtered.csv"
206
- test_csv = "USA_Winter/test_filtered.csv"
207
- val_csv = "USA_Winter/valid_filtered.csv"
208
- return [
209
- datasets.SplitGenerator(name="train", gen_kwargs={"filepath": train_csv}),
210
- datasets.SplitGenerator(name="val", gen_kwargs={"filepath": test_csv}),
211
- datasets.SplitGenerator(name="test", gen_kwargs={"filepath": val_csv}),
212
- ]
213
- elif self.config.name == "USA_Summer":
214
- train_csv = "USA_Summer/train_filtered.csv"
215
- test_csv = "USA_Summer/test_filtered.csv"
216
- val_csv = "USA_Summer/valid_filtered.csv"
217
- return [
218
- datasets.SplitGenerator(name="train", gen_kwargs={"filepath": train_csv}),
219
- datasets.SplitGenerator(name="val", gen_kwargs={"filepath": test_csv}),
220
- datasets.SplitGenerator(name="test", gen_kwargs={"filepath": val_csv}),
221
- ]
222
- elif self.config.name == "Species_ID":
223
- kenya_csv = "Species_ID/species_list_kenya.csv"
224
- southafrica_csv = "Species_ID/species_list_south_africa.csv"
225
- usa_csv = "Species_ID/species_list_usa.csv"
226
- return [
227
- datasets.SplitGenerator(name="Kenya", gen_kwargs={"filepath": kenya_csv}),
228
- datasets.SplitGenerator(name="South_Africa", gen_kwargs={"filepath": southafrica_csv}),
229
- datasets.SplitGenerator(name="USA", gen_kwargs={"filepath": usa_csv}),
230
- ]
231
- else:
232
- raise ValueError(f"Unknown config: {self.config.name}")
233
-
234
- def _generate_examples(self, filepath):
235
- if self.config.name in ["Kenya", "South_Africa", "USA_Summer", "USA_Winter"]:
236
- df_metadata = pd.read_csv(filepath)
237
- for idx in range(len(df_metadata)):
238
- row = df_metadata.iloc[idx]
239
- yield idx, {
240
- "hotspot_id": row['hotspot_id'],
241
- "lon": row['lon'],
242
- "lat": row['lat'],
243
- 'sat_imagery_path': f"images/{row['hotspot_id']}.tif",
244
- 'environmental_path': f"environmental/{row['hotspot_id']}.npy",
245
- "target_path": f"targets/{row['hotspot_id']}.json",
246
- "bio_1": row['bio_1'],
247
- "bio_2": row['bio_2'],
248
- "bio_3": row['bio_3'],
249
- "bio_4": row['bio_4'],
250
- "bio_5": row['bio_5'],
251
- "bio_6": row['bio_6'],
252
- "bio_7": row['bio_7'],
253
- "bio_8": row['bio_8'],
254
- "bio_9": row['bio_9'],
255
- "bio_10": row['bio_10'],
256
- "bio_11": row['bio_11'],
257
- "bio_12": row['bio_12'],
258
- "bio_13": row['bio_13'],
259
- "bio_14": row['bio_14'],
260
- "bio_15": row['bio_15'],
261
- "bio_16": row['bio_16'],
262
- "bio_17": row['bio_17'],
263
- "bio_18": row['bio_18'],
264
- "bio_19": row['bio_19'],
265
- "num_complete_checklists" : row['num_complete_checklists'],
266
- "split": row['split'],
267
- }
268
- if self.config.name == "Species_ID":
269
- df_metadata = pd.read_csv(filepath)
270
- for idx in range(len(df_metadata)):
271
- row = df_metadata.iloc[idx]
272
- yield idx, {
273
- "scientific_name": row['scientific_name'],
274
- "ebird_code": row['ebird_code'],
275
- "inat_preview": row['inat_preview'],
276
- }