Datasets:

ArXiv:
License:
ghjuliasialelli commited on
Commit
45683f7
·
verified ·
1 Parent(s): 820eb00

Delete AGBD.py

Browse files
Files changed (1) hide show
  1. AGBD.py +0 -471
AGBD.py DELETED
@@ -1,471 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- ############################################################################################################################
16
- # IMPORTS
17
-
18
- import numpy as np
19
- import datasets
20
- from datasets import Value
21
- import pickle
22
- import pandas as pd
23
-
24
- ############################################################################################################################
25
- # GLOBAL VARIABLES
26
-
27
- # BibTeX citation
28
- _CITATION = """\
29
- @misc{https://doi.org/10.48550/arxiv.2406.04928,
30
- doi = {10.48550/ARXIV.2406.04928},
31
- url = {https://arxiv.org/abs/2406.04928},
32
- author = {Sialelli, Ghjulia and Peters, Torben and Wegner, Jan D. and Schindler, Konrad},
33
- keywords = {Computer Vision and Pattern Recognition (cs.CV), Machine Learning (cs.LG), Image and Video Processing (eess.IV), FOS: Computer and information sciences, FOS: Computer and information sciences, FOS: Electrical engineering, electronic engineering, information engineering, FOS: Electrical engineering, electronic engineering, information engineering},
34
- title = {AGBD: A Global-scale Biomass Dataset},
35
- publisher = {arXiv},
36
- year = {2024},
37
- copyright = {Creative Commons Attribution Non Commercial Share Alike 4.0 International}
38
- }
39
- """
40
-
41
- # Description of the dataset
42
- _DESCRIPTION = """\
43
- This new dataset is a machine-learning ready dataset of high-resolution (10m), multi-modal satellite imagery, paired with AGB reference values from NASA’s Global Ecosystem Dynamics Investigation (GEDI) mission.
44
- """
45
-
46
- # TODO: Add a link to an official homepage for the dataset here
47
- _HOMEPAGE = ""
48
-
49
- # License of the dataset
50
- _LICENSE = "https://creativecommons.org/licenses/by-nc/4.0/"
51
-
52
- # Metadata features
53
- feature_dtype = {'s2_num_days': Value('int16'),
54
- 'gedi_num_days': Value('uint16'),
55
- 'lat': Value('float32'),
56
- 'lon': Value('float32'),
57
- "agbd_se": Value('float32'),
58
- "elev_lowes": Value('float32'),
59
- "leaf_off_f": Value('uint8'),
60
- "pft_class": Value('uint8'),
61
- "region_cla": Value('uint8'),
62
- "rh98": Value('float32'),
63
- "sensitivity": Value('float32'),
64
- "solar_elev": Value('float32'),
65
- "urban_prop":Value('uint8')}
66
-
67
- # Default input features configuration
68
- default_input_features = {'S2_bands': ['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B8A', 'B09','B11', 'B12'],
69
- 'S2_dates' : False, 'lat_lon': True, 'GEDI_dates': False, 'ALOS': True, 'CH': True, 'LC': True,
70
- 'DEM': True, 'topo': False}
71
-
72
- # Mapping from Sentinel-2 band to index in the data
73
- s2_bands_idx = {'B01': 0, 'B02': 1, 'B03': 2, 'B04': 3, 'B05': 4, 'B06': 5, 'B07': 6, 'B08': 7, 'B8A': 8, 'B09': 9, 'B11': 10, 'B12': 11}
74
-
75
- # Normalization values
76
- norm_values = {
77
- 'ALOS_bands': {
78
- 'HH': {'mean': -10.381429, 'std': 8.561741, 'min': -83.0, 'max': 13.329468, 'p1': -83.0, 'p99': -2.1084213},
79
- 'HV': {'mean': -16.722847, 'std': 8.718428, 'min': -83.0, 'max': 11.688309, 'p1': -83.0, 'p99': -7.563843}},
80
- 'S2_bands':
81
- {'B01': {'mean': 0.12478869, 'std': 0.024433358, 'min': 1e-04, 'max': 1.8808, 'p1': 0.0787, 'p99': 0.1944},
82
- 'B02': {'mean': 0.13480005, 'std': 0.02822557, 'min': 1e-04, 'max': 2.1776, 'p1': 0.0925, 'p99': 0.2214},
83
- 'B03': {'mean': 0.16031432, 'std': 0.032037303, 'min': 1e-04, 'max': 2.12, 'p1': 0.1035, 'p99': 0.2556},
84
- 'B04': {'mean': 0.1532097, 'std': 0.038628064, 'min': 1e-04, 'max': 2.0032, 'p1': 0.1023, 'p99': 0.2816},
85
- 'B05': {'mean': 0.20312776, 'std': 0.04205057, 'min': 0.0422, 'max': 1.7502, 'p1': 0.1178, 'p99': 0.3189},
86
- 'B06': {'mean': 0.32636437, 'std': 0.07139242, 'min': 0.0502, 'max': 1.7245, 'p1': 0.1632, 'p99': 0.519},
87
- 'B07': {'mean': 0.36605212, 'std': 0.08555025, 'min': 0.0616, 'max': 1.7149, 'p1': 0.1775, 'p99': 0.6075},
88
- 'B08': {'mean': 0.3811653, 'std': 0.092815965, 'min': 1e-04, 'max': 1.7488, 'p1': 0.1691, 'p99': 0.646},
89
- 'B8A': {'mean': 0.3910436, 'std': 0.0896364, 'min': 0.055, 'max': 1.688, 'p1': 0.187, 'p99': 0.6385},
90
- 'B09': {'mean': 0.3910644, 'std': 0.0836445, 'min': 0.0012, 'max': 1.7915, 'p1': 0.2123, 'p99': 0.6238},
91
- 'B11': {'mean': 0.2917373, 'std': 0.07472579, 'min': 0.0953, 'max': 1.648, 'p1': 0.1334, 'p99': 0.4827},
92
- 'B12': {'mean': 0.21169408, 'std': 0.05880649, 'min': 0.0975, 'max': 1.6775, 'p1': 0.1149, 'p99': 0.3869}},
93
- 'CH': {
94
- 'ch': {'mean': 9.736144, 'std': 9.493601, 'min': 0.0, 'max': 61.0, 'p1': 0.0, 'p99': 38.0},
95
- 'std': {'mean': 7.9882116, 'std': 4.549494, 'min': 0.0, 'max': 254.0, 'p1': 0.0, 'p99': 18.0}},
96
- 'DEM': {
97
- 'mean': 604.63727, 'std': 588.02094, 'min': -82.0, 'max': 5205.0, 'p1': 4.0, 'p99': 2297.0},
98
- 'Sentinel_metadata': {
99
- 'S2_vegetation_score': {'mean': 89.168724, 'std': 17.17321, 'min': 20.0, 'max': 100.0, 'p1': 29.0, 'p99': 100.0},
100
- 'S2_date': {'mean': 299.1638, 'std': 192.87402, 'min': -165.0, 'max': 623.0, 'p1': -105.0, 'p99': 602.0}},
101
- 'GEDI': {
102
- 'agbd': {'mean': 66.97266, 'std': 98.66588, 'min': 0.0, 'max': 499.99985, 'p1': 0.0, 'p99': 429.7605},
103
- 'agbd_se': {'mean': 8.360701, 'std': 4.211524, 'min': 2.981795, 'max': 25.041483, 'p1': 2.9819136, 'p99': 17.13577},
104
- 'rh98': {'mean': 12.074685, 'std': 10.276359, 'min': -1.1200076, 'max': 111.990005, 'p1': 2.3599916, 'p99': 41.96},
105
- 'date': {'mean': 361.7431, 'std': 175.37294, 'min': 0.0, 'max': 624.0, 'p1': 5.0, 'p99': 619.0}}
106
- }
107
-
108
- # Define the nodata values for each data source
109
- NODATAVALS = {'S2_bands' : 0, 'CH': 255, 'ALOS_bands': -9999.0, 'DEM': -9999, 'LC': 255}
110
-
111
- # Reference biomes, and derived metrics
112
- REF_BIOMES = {20: 'Shrubs', 30: 'Herbaceous vegetation', 40: 'Cultivated', 90: 'Herbaceous wetland', 111: 'Closed-ENL', 112: 'Closed-EBL', 114: 'Closed-DBL', 115: 'Closed-mixed', 116: 'Closed-other', 121: 'Open-ENL', 122: 'Open-EBL', 124: 'Open-DBL', 125: 'Open-mixed', 126: 'Open-other'}
113
- _biome_values_mapping = {v: i for i, v in enumerate(REF_BIOMES.keys())}
114
- _ref_biome_values = [v for v in REF_BIOMES.keys()]
115
-
116
- ############################################################################################################################
117
- # Helper functions
118
-
119
- def normalize_data(data, norm_values, norm_strat, nodata_value = None) :
120
- """
121
- Normalize the data, according to various strategies:
122
- - mean_std: subtract the mean and divide by the standard deviation
123
- - pct: subtract the 1st percentile and divide by the 99th percentile
124
- - min_max: subtract the minimum and divide by the maximum
125
-
126
- Args:
127
- - data (np.array): the data to normalize
128
- - norm_values (dict): the normalization values
129
- - norm_strat (str): the normalization strategy
130
-
131
- Returns:
132
- - normalized_data (np.array): the normalized data
133
- """
134
-
135
- if norm_strat == 'mean_std' :
136
- mean, std = norm_values['mean'], norm_values['std']
137
- if nodata_value is not None :
138
- data = np.where(data == nodata_value, 0, (data - mean) / std)
139
- else : data = (data - mean) / std
140
-
141
- elif norm_strat == 'pct' :
142
- p1, p99 = norm_values['p1'], norm_values['p99']
143
- if nodata_value is not None :
144
- data = np.where(data == nodata_value, 0, (data - p1) / (p99 - p1))
145
- else :
146
- data = (data - p1) / (p99 - p1)
147
- data = np.clip(data, 0, 1)
148
-
149
- elif norm_strat == 'min_max' :
150
- min_val, max_val = norm_values['min'], norm_values['max']
151
- if nodata_value is not None :
152
- data = np.where(data == nodata_value, 0, (data - min_val) / (max_val - min_val))
153
- else:
154
- data = (data - min_val) / (max_val - min_val)
155
-
156
- else:
157
- raise ValueError(f'Normalization strategy `{norm_strat}` is not valid.')
158
-
159
- return data
160
-
161
-
162
- def normalize_bands(bands_data, norm_values, order, norm_strat, nodata_value = None) :
163
- """
164
- This function normalizes the bands data using the normalization values and strategy.
165
-
166
- Args:
167
- - bands_data (np.array): the bands data to normalize
168
- - norm_values (dict): the normalization values
169
- - order (list): the order of the bands
170
- - norm_strat (str): the normalization strategy
171
- - nodata_value (int/float): the nodata value
172
-
173
- Returns:
174
- - bands_data (np.array): the normalized bands data
175
- """
176
-
177
- for i, band in enumerate(order) :
178
- band_norm = norm_values[band]
179
- bands_data[i, :, :] = normalize_data(bands_data[i, :, :], band_norm, norm_strat, nodata_value)
180
-
181
- return bands_data
182
-
183
-
184
- def one_hot(x) :
185
- one_hot = np.zeros(len(_biome_values_mapping))
186
- one_hot[_biome_values_mapping.get(x, 0)] = 1
187
- return one_hot
188
-
189
- def encode_biome(lc, encode_strat, embeddings = None) :
190
- """
191
- This function encodes the land cover data using different strategies: 1) sin/cosine encoding,
192
- 2) cat2vec embeddings, 3) one-hot encoding.
193
-
194
- Args:
195
- - lc (np.array): the land cover data
196
- - encode_strat (str): the encoding strategy
197
- - embeddings (dict): the cat2vec embeddings
198
-
199
- Returns:
200
- - encoded_lc (np.array): the encoded land cover data
201
- """
202
-
203
- if encode_strat == 'sin_cos' :
204
- # Encode the LC classes with sin/cosine values and scale the data to [0,1]
205
- lc_cos = np.where(lc == NODATAVALS['LC'], 0, (np.cos(2 * np.pi * lc / 201) + 1) / 2)
206
- lc_sin = np.where(lc == NODATAVALS['LC'], 0, (np.sin(2 * np.pi * lc / 201) + 1) / 2)
207
- return np.stack([lc_cos, lc_sin], axis = -1).astype(np.float32)
208
-
209
- elif encode_strat == 'cat2vec' :
210
- # Embed the LC classes using the cat2vec embeddings
211
- lc_cat2vec = np.vectorize(lambda x: embeddings.get(x, embeddings.get(0)), signature = '()->(n)')(lc)
212
- return lc_cat2vec.astype(np.float32)
213
-
214
- elif encode_strat == 'onehot' :
215
- lc_onehot = np.vectorize(one_hot, signature = '() -> (n)')(lc).astype(np.float32)
216
- return lc_onehot
217
-
218
- else: raise ValueError(f'Encoding strategy `{encode_strat}` is not valid.')
219
-
220
-
221
- def compute_num_features(input_features, encode_strat) :
222
- """
223
- This function computes the number of features that will be used in the model.
224
-
225
- Args:
226
- - input_features (dict): the input features configuration
227
- - encode_strat (str): the encoding strategy
228
-
229
- Returns:
230
- - num_features (int): the number of features
231
- """
232
-
233
- num_features = len(input_features['S2_bands'])
234
- if input_features['S2_dates'] : num_features += 3
235
- if input_features['lat_lon'] : num_features += 4
236
- if input_features['GEDI_dates'] : num_features += 3
237
- if input_features['ALOS'] : num_features += 2
238
- if input_features['CH'] : num_features += 2
239
- if input_features['LC'] :
240
- num_features += 1
241
- if encode_strat == 'sin_cos' : num_features += 2
242
- elif encode_strat == 'cat2vec' : num_features += 5
243
- elif encode_strat == 'onehot' : num_features += len(REF_BIOMES)
244
- if input_features['DEM'] : num_features += 1
245
- if input_features['topo'] : num_features += 3
246
-
247
- return num_features
248
-
249
-
250
- def concatenate_features(patch, lc_patch, input_features, encode_strat) :
251
- """
252
- This function concatenates the features that the user requested.
253
-
254
- Args:
255
- - patch (np.array): the patch data
256
- - lc_patch (np.array): the land cover data
257
- - input_features (dict): the input features configuration
258
- - encode_strat (str): the encoding strategy
259
-
260
- Returns:
261
- - out_patch (np.array): the concatenated features
262
- """
263
-
264
- # Compute the number of features
265
- num_features = compute_num_features(input_features, encode_strat)
266
- out_patch = np.zeros((num_features, patch.shape[1], patch.shape[2]), dtype = np.float32)
267
-
268
- # Concatenate the features
269
- current_idx = 0
270
-
271
- # Sentinel-2 bands
272
- s2_indices = [s2_bands_idx[band] for band in input_features['S2_bands']]
273
- out_patch[: current_idx + len(s2_indices)] = patch[s2_indices]
274
- current_idx += len(s2_indices)
275
-
276
- # S2 dates
277
- if input_features['S2_dates'] :
278
- out_patch[current_idx : current_idx + 3] = patch[12:15]
279
- current_idx += 3
280
-
281
- # Lat/Lon
282
- if input_features['lat_lon'] :
283
- out_patch[current_idx : current_idx + 4] = patch[15:19]
284
- current_idx += 4
285
-
286
- # GEDI dates
287
- if input_features['GEDI_dates'] :
288
- out_patch[current_idx : current_idx + 3] = patch[19:22]
289
- current_idx += 3
290
-
291
- # ALOS bands
292
- if input_features['ALOS'] :
293
- out_patch[current_idx : current_idx + 2] = patch[22:24]
294
- current_idx += 2
295
-
296
- # CH bands
297
- if input_features['CH'] :
298
- out_patch[current_idx] = patch[24]
299
- out_patch[current_idx + 1] = patch[25]
300
- current_idx += 2
301
-
302
- # LC data
303
- if input_features['LC'] :
304
-
305
- # LC encoding
306
- if encode_strat == 'sin_cos' :
307
- out_patch[current_idx : current_idx + 2] = lc_patch
308
- current_idx += 2
309
- elif encode_strat == 'cat2vec' :
310
- out_patch[current_idx : current_idx + 5] = lc_patch
311
- current_idx += 5
312
- elif encode_strat == 'onehot' :
313
- out_patch[current_idx : current_idx + len(REF_BIOMES)] = lc_patch
314
- current_idx += len(REF_BIOMES)
315
- elif encode_strat == 'none' :
316
- out_patch[current_idx] = lc_patch
317
- current_idx += 1
318
-
319
- # LC probability
320
- out_patch[current_idx] = patch[27]
321
- current_idx += 1
322
-
323
- # Topographic data
324
- if input_features['topo'] :
325
- out_patch[current_idx : current_idx + 3] = patch[28:31]
326
- current_idx += 3
327
-
328
- # DEM
329
- if input_features['DEM'] :
330
- out_patch[current_idx] = patch[31]
331
- current_idx += 1
332
-
333
- return out_patch
334
-
335
- #########################################################################################################################
336
- # DATASET CLASS DEFINITION
337
-
338
- class NewDataset(datasets.GeneratorBasedBuilder):
339
- """DatasetBuilder for AGBD dataset."""
340
- def __init__(self, *args, input_features = default_input_features, additional_features = [], norm_strat = 'pct',
341
- encode_strat = 'sin_cos', patch_size = 15, **kwargs):
342
-
343
- self.inner_dataset_kwargs = kwargs
344
- self._is_streaming = False
345
- self.patch_size = patch_size
346
-
347
- assert norm_strat in ['mean_std', 'pct', 'none'], f'Normalization strategy `{norm_strat}` is not valid.'
348
- self.norm_strat = norm_strat
349
-
350
- assert encode_strat in ['sin_cos', 'cat2vec', 'onehot', 'none'], f'Encoding strategy `{encode_strat}` is not valid.'
351
- self.encode_strat = encode_strat
352
-
353
- self.input_features = input_features
354
- self.additional_features = additional_features
355
-
356
- if self.encode_strat == 'cat2vec' :
357
- embeddings = pd.read_csv("embeddings_train.csv")
358
- embeddings = dict([(v,np.array([a,b,c,d,e])) for v, a,b,c,d,e in zip(embeddings.mapping, embeddings.dim0, embeddings.dim1, embeddings.dim2, embeddings.dim3, embeddings.dim4)])
359
- self.embeddings = embeddings
360
- else: self.embeddings = None
361
-
362
- super().__init__(*args, **kwargs)
363
-
364
- VERSION = datasets.Version("1.1.0")
365
-
366
- BUILDER_CONFIGS = [
367
- datasets.BuilderConfig(name="default", version=VERSION, description="Normalized data"),
368
- datasets.BuilderConfig(name="unnormalized", version=VERSION, description="Unnormalized data"),
369
- ]
370
-
371
- DEFAULT_CONFIG_NAME = "default"
372
-
373
- def as_streaming_dataset(self, split=None, base_path=None):
374
- self._is_streaming = True
375
- return super().as_streaming_dataset(split=split, base_path=base_path)
376
-
377
- def _info(self):
378
-
379
- all_features = {
380
- 'input': datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value('float32')))),
381
- 'label': Value('float32')
382
- }
383
- for feat in self.additional_features:
384
- all_features[feat] = feature_dtype[feat]
385
- features = datasets.Features(all_features)
386
-
387
- return datasets.DatasetInfo(
388
- description=_DESCRIPTION,
389
- features=features,
390
- homepage=_HOMEPAGE,
391
- license=_LICENSE,
392
- citation=_CITATION,
393
- )
394
-
395
- def _split_generators(self, dl_manager):
396
- self.original_dataset = datasets.load_dataset("prs-eth/AGBD_raw", streaming=self._is_streaming)
397
- return [
398
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"split": "train"}),
399
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"split": "validation"}),
400
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"split": "test"}),
401
- ]
402
-
403
- def _generate_examples(self, split):
404
- for i, d in enumerate(self.original_dataset[split]):
405
-
406
- patch = np.asarray(d["input"])
407
-
408
- # ------------------------------------------------------------------------------------------------
409
- # Process the data that needs to be processed
410
-
411
- # Structure of the d["input"] data:
412
- # - 12 x Sentinel-2 bands
413
- # - 3 x S2 dates bands (s2_num_days, s2_doy_cos, s2_doy_sin)
414
- # - 4 x lat/lon (lat_cos, lat_sin, lon_cos, lon_sin)
415
- # - 3 x GEDI dates bands (gedi_num_days, gedi_doy_cos, gedi_doy_sin)
416
- # - 2 x ALOS bands (HH, HV)
417
- # - 2 x CH bands (ch, std)
418
- # - 2 x LC bands (lc encoding, lc_prob)
419
- # - 4 x DEM bands (slope, aspect_cos, aspect_sin, dem)
420
-
421
- if self.norm_strat != 'none' :
422
-
423
- # Normalize S2 bands
424
- patch[:12] = normalize_bands(patch[:12], norm_values['S2_bands'], ['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B8A', 'B09','B11', 'B12'], self.norm_strat, NODATAVALS['S2_bands'])
425
-
426
- # Normalize s2_num_days
427
- patch[12] = normalize_data(patch[12], norm_values['Sentinel_metadata']['S2_date'], 'min_max' if self.norm_strat == 'pct' else self.norm_strat)
428
-
429
- # Normalize gedi_num_days
430
- patch[19] = normalize_data(patch[19], norm_values['GEDI']['date'], 'min_max' if self.norm_strat == 'pct' else self.norm_strat)
431
-
432
- # Normalize ALOS bands
433
- patch[22:24] = normalize_bands(patch[22:24], norm_values['ALOS_bands'], ['HH', 'HV'], self.norm_strat, NODATAVALS['ALOS_bands'])
434
-
435
- # Normalize CH bands
436
- patch[24] = normalize_data(patch[24], norm_values['CH']['ch'], self.norm_strat, NODATAVALS['CH'])
437
- patch[25] = normalize_data(patch[25], norm_values['CH']['std'], self.norm_strat, NODATAVALS['CH'])
438
-
439
- # Normalize DEM bands
440
- patch[31] = normalize_data(patch[31], norm_values['DEM'], self.norm_strat, NODATAVALS['DEM'])
441
-
442
- # Encode LC data
443
- if self.encode_strat != 'none' : lc_patch = encode_biome(patch[26], self.encode_strat, self.embeddings).swapaxes(-1,0)
444
- else: lc_patch = patch[26]
445
-
446
- # Put lc_prob in [0,1] range
447
- patch[27] = patch[27] / 100
448
-
449
- # ------------------------------------------------------------------------------------------------
450
- # Concatenate the features that the user requested
451
-
452
- out_patch = concatenate_features(patch, lc_patch, self.input_features, self.encode_strat)
453
-
454
- # ------------------------------------------------------------------------------------------------
455
-
456
- # Crop to the patch size
457
- start_x = (patch.shape[1] - self.patch_size) // 2
458
- start_y = (patch.shape[2] - self.patch_size) // 2
459
- out_patch = out_patch[:, start_x : start_x + self.patch_size, start_y : start_y + self.patch_size]
460
-
461
- # ------------------------------------------------------------------------------------------------
462
-
463
- # Create the data dictionary
464
- data = {'input': out_patch, 'label': d["label"]}
465
-
466
- # Add the additional features
467
- for feat in self.additional_features:
468
- data[feat] = d["metadata"][feat]
469
-
470
- yield i, data
471
-