Datasets:

ArXiv:
License:
ghjuliasialelli commited on
Commit
4bf4e92
·
verified ·
1 Parent(s): 77c8dcc

Update, V1.1.0

Browse files
Files changed (1) hide show
  1. AGBD.py +313 -184
AGBD.py CHANGED
@@ -11,38 +11,45 @@
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """TODO: Add a description here."""
 
16
 
17
  import numpy as np
18
  import datasets
19
  from datasets import Value
20
  import pickle
21
- # TODO: Add BibTeX citation
22
- # Find for instance the citation on arxiv or on the dataset repo/website
 
 
 
 
23
  _CITATION = """\
24
- @InProceedings{huggingface:dataset,
25
- title = {A great new dataset},
26
- author={huggingface, Inc.
27
- },
28
- year={2020}
 
 
 
 
29
  }
30
  """
31
 
32
- # TODO: Add description of the dataset here
33
- # You can copy an official description
34
  _DESCRIPTION = """\
35
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
36
  """
37
 
38
  # TODO: Add a link to an official homepage for the dataset here
39
  _HOMEPAGE = ""
40
 
41
- # TODO: Add the licence for the dataset here if you can find it
42
- _LICENSE = ""
43
-
44
-
45
 
 
46
  feature_dtype = {'s2_num_days': Value('int16'),
47
  'gedi_num_days': Value('uint16'),
48
  'lat': Value('float32'),
@@ -57,213 +64,305 @@ feature_dtype = {'s2_num_days': Value('int16'),
57
  "solar_elev": Value('float32'),
58
  "urban_prop":Value('uint8')}
59
 
60
- norm_values = {'ALOS_bands': {
61
- 'HH': {'mean': -10.381429, 'std': 8.561741, 'min': -83.0, 'max': 13.329468, 'p1': -19.542107, 'p99': -2.402588},
62
- 'HV': {'mean': -16.722847, 'std': 8.718428, 'min': -83.0, 'max': 11.688309, 'p1': -29.285168, 'p99': -8.773987}},
63
- 'S2_bands': {'B01': {'mean': 0.12478869, 'std': 0.024433358, 'min': 1e-04, 'max': 1.8808, 'p1': 0.0787,
64
- 'p99': 0.1946},
65
- 'B02': {'mean': 0.13480005, 'std': 0.02822557, 'min': 1e-04, 'max': 2.1776, 'p1': 0.0925,
66
- 'p99': 0.2216},
67
- 'B03': {'mean': 0.16031432, 'std': 0.032037303, 'min': 1e-04, 'max': 2.12, 'p1': 0.1035,
68
- 'p99': 0.2556},
69
- 'B04': {'mean': 0.1532097, 'std': 0.038628064, 'min': 1e-04, 'max': 2.0032, 'p1': 0.1023,
70
- 'p99': 0.2816},
71
- 'B05': {'mean': 0.20312776, 'std': 0.04205057, 'min': 0.0422, 'max': 1.7502, 'p1': 0.1178,
72
- 'p99': 0.319},
73
- 'B06': {'mean': 0.32636437, 'std': 0.07139242, 'min': 0.0502, 'max': 1.7245, 'p1': 0.1633,
74
- 'p99': 0.519},
75
- 'B07': {'mean': 0.36605212, 'std': 0.08555025, 'min': 0.0616, 'max': 1.7149, 'p1': 0.1776,
76
- 'p99': 0.6076},
77
- 'B08': {'mean': 0.3811653, 'std': 0.092815965, 'min': 1e-04, 'max': 1.7488, 'p1': 0.1691,
78
- 'p99': 0.646},
79
- 'B8A': {'mean': 0.3910436, 'std': 0.0896364, 'min': 0.055, 'max': 1.688, 'p1': 0.1871,
80
- 'p99': 0.6386},
81
- 'B09': {'mean': 0.3910644, 'std': 0.0836445, 'min': 0.0012, 'max': 1.7915, 'p1': 0.2124,
82
- 'p99': 0.6241},
83
- 'B11': {'mean': 0.2917373, 'std': 0.07472579, 'min': 0.0953, 'max': 1.648, 'p1': 0.1334,
84
- 'p99': 0.4827},
85
- 'B12': {'mean': 0.21169408, 'std': 0.05880649, 'min': 0.0975, 'max': 1.6775, 'p1': 0.115,
86
- 'p99': 0.3872}},
87
- 'CH': {'ch': {'mean': 9.736144, 'std': 9.493601, 'min': 0.0, 'max': 61.0, 'p1': 0.0, 'p99': 38.0},
88
- 'std': {'mean': 7.9882116, 'std': 4.549494, 'min': 0.0, 'max': 254.0, 'p1': 0.0, 'p99': 18.0}},
89
- 'DEM': {'mean': 604.63727, 'std': 588.02094, 'min': -82.0, 'max': 5205.0, 'p1': 507.0, 'p99': 450.0},
90
- 'Sentinel_metadata': {
91
- 'S2_vegetation_score': {'mean': 89.168724, 'std': 17.17321, 'min': 20.0, 'max': 100.0, 'p1': 29.0,
92
- 'p99': 100.0},
93
- 'S2_date': {'mean': 299.1638, 'std': 192.87402, 'min': -165.0, 'max': 623.0, 'p1': 253.0,
94
- 'p99': 277.0}}, 'GEDI': {
95
- 'agbd': {'mean': 66.97266, 'std': 98.66588, 'min': 0.0, 'max': 499.99985, 'p1': 0.9703503, 'p99': 163.46234},
96
- 'agbd_se': {'mean': 8.360701, 'std': 4.211524, 'min': 2.981795, 'max': 25.041483, 'p1': 2.9830396,
97
- 'p99': 8.612499},
98
- 'rh98': {'mean': 12.074685, 'std': 10.276359, 'min': -1.1200076, 'max': 111.990005, 'p1': 2.3599916,
99
- 'p99': 6.9500012},
100
- 'date': {'mean': 361.7431, 'std': 175.37294, 'min': 0.0, 'max': 624.0, 'p1': 360.0, 'p99': 146.0}}}
101
-
102
- def encode_lat_lon(lat, lon):
 
 
 
 
 
 
 
 
 
 
103
  """
104
- Encode the latitude and longitude into sin/cosine values. We use a simple WRAP positional encoding, as
105
- Mac Aodha et al. (2019).
 
 
106
 
107
  Args:
108
- - lat (float): the latitude
109
- - lon (float): the longitude
 
110
 
111
  Returns:
112
- - (lat_cos, lat_sin, lon_cos, lon_sin) (tuple): the sin/cosine values for the latitude and longitude
113
  """
114
 
115
- # The latitude goes from -90 to 90
116
- lat_cos, lat_sin = np.cos(np.pi * lat / 90), np.sin(np.pi * lat / 90)
117
- # The longitude goes from -180 to 180
118
- lon_cos, lon_sin = np.cos(np.pi * lon / 180), np.sin(np.pi * lon / 180)
 
 
 
 
 
 
 
 
 
119
 
120
- # Now we put everything in the [0,1] range
121
- lat_cos, lat_sin = (lat_cos + 1) / 2, (lat_sin + 1) / 2
122
- lon_cos, lon_sin = (lon_cos + 1) / 2, (lon_sin + 1) / 2
 
 
 
 
 
 
123
 
124
- return lat_cos, lat_sin, lon_cos, lon_sin
125
 
126
 
127
- def encode_coords(central_lat, central_lon, patch_size, resolution=10):
128
  """
129
- This function computes the latitude and longitude of a patch, from the latitude and longitude of its central pixel.
130
- It then encodes these values into sin/cosine values, and scales the results to [0,1].
131
 
132
  Args:
133
- - central_lat (float): the latitude of the central pixel
134
- - central_lon (float): the longitude of the central pixel
135
- - patch_size (tuple): the size of the patch
136
- - resolution (int): the resolution of the patch
 
137
 
138
  Returns:
139
- - (lat_cos, lat_sin, lon_cos, lon_sin) (tuple): the sin/cosine values for the latitude and longitude
140
  """
 
 
 
 
 
 
141
 
142
- # Initialize arrays to store latitude and longitude coordinates
143
-
144
- i_indices, j_indices = np.indices(patch_size)
145
-
146
- # Calculate the distance offset in meters for each pixel
147
- offset_lat = (i_indices - patch_size[0] // 2) * resolution
148
- offset_lon = (j_indices - patch_size[1] // 2) * resolution
149
 
150
- # Calculate the latitude and longitude for each pixel
151
- latitudes = central_lat + (offset_lat / 6371000) * (180 / np.pi)
152
- longitudes = central_lon + (offset_lon / 6371000) * (180 / np.pi) / np.cos(central_lat * np.pi / 180)
 
153
 
154
- lat_cos, lat_sin, lon_cos, lon_sin = encode_lat_lon(latitudes, longitudes)
 
 
 
155
 
156
- return lat_cos, lat_sin, lon_cos, lon_sin
 
 
 
157
 
 
 
 
158
 
159
- """
160
- Example usage:
161
- lat_cos, lat_sin, lon_cos, lon_sin = encode_coords(lat, lon, self.patch_size)
162
- lat_cos, lat_sin, lon_cos, lon_sin = lat_cos[..., np.newaxis], lat_sin[..., np.newaxis], lon_cos[..., np.newaxis], lon_sin[..., np.newaxis]
163
- """
 
 
 
 
 
164
 
 
 
 
165
 
166
- #########################################################################################################################
167
- # Denormalizer
168
 
169
 
170
- def denormalize_data(data, norm_values, norm_strat='pct'):
171
  """
172
- Normalize the data, according to various strategies:
173
- - mean_std: subtract the mean and divide by the standard deviation
174
- - pct: subtract the 1st percentile and divide by the 99th percentile
175
- - min_max: subtract the minimum and divide by the maximum
176
 
177
  Args:
178
- - data (np.array): the data to normalize
179
- - norm_values (dict): the normalization values
180
- - norm_strat (str): the normalization strategy
181
 
182
  Returns:
183
- - normalized_data (np.array): the normalized data
184
  """
185
 
186
- if norm_strat == 'mean_std':
187
- mean, std = norm_values['mean'], norm_values['std']
188
- data = (data - mean) / std
 
 
 
 
 
 
 
 
 
 
189
 
190
- elif norm_strat == 'pct':
191
- p1, p99 = norm_values['p1'], norm_values['p99']
192
- data = data * (p99 - p1) + p1
193
 
194
- elif norm_strat == 'min_max':
195
- min_val, max_val = norm_values['min'], norm_values['max']
196
- data = data * (max_val - min_val) + min_val
197
 
198
- else:
199
- raise ValueError(f'De-normalization strategy `{norm_strat}` is not valid.')
200
-
201
- return data
202
-
203
-
204
- def denormalize_bands(bands_data, norm_values, order, norm_strat='pct'):
205
  """
206
- This function normalizes the bands data using the normalization values and strategy.
207
 
208
  Args:
209
- - bands_data (np.array): the bands data to normalize
210
- - norm_values (dict): the normalization values
211
- - order (list): the order of the bands
212
- - norm_strat (str): the normalization strategy
213
 
214
  Returns:
215
- - bands_data (np.array): the normalized bands data
216
  """
217
 
218
- for i, band in enumerate(order):
219
- band_norm = norm_values[band]
220
- bands_data[:, :, i] = denormalize_data(bands_data[:, :, i], band_norm, norm_strat)
221
 
222
- return bands_data
 
223
 
224
- """
225
- def decode_lc(encoded_lc, mode='cos'):
226
- # Encode the LC classes with sin/cosine values and scale the data to [0,1]
227
- if mode == 'cos':
228
- lc = 100 * np.arccos(2 * encoded_lc - 1) / (2 * np.pi)
229
- elif mode == 'sin':
230
- lc = 100 * np.arcsin(2 * encoded_lc - 1) / (2 * np.pi)
231
- else:
232
- raise ValueError(f'Mode `{mode}` is not valid.')
233
- return lc
234
- """
235
-
236
- def recover_lc_map(lc_cos, lc_sin):
237
 
238
- # Convert lc_cos and lc_sin back to the range of the original sin and cos values
239
- lc_cos = 2 * lc_cos - 1
240
- lc_sin = 2 * lc_sin - 1
 
241
 
242
- # Calculate the angles using arccos and arcsin
243
- theta_cos = np.arccos(lc_cos)
244
- sin_theta_cos = np.sin(theta_cos)
245
- check = np.isclose(sin_theta_cos, lc_sin, atol = 1e-8)
246
- theta = np.where(check, theta_cos, 2 * np.pi - theta_cos)
247
 
248
- # Convert the angle theta back to lc_map
249
- lc_map = np.round((theta / (2 * np.pi)) * 100)
250
- lc_map = np.where(lc_map % 10 != 0, lc_map + 100, lc_map)
 
 
 
 
 
 
251
 
252
- return lc_map
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
253
 
 
 
254
 
255
  class NewDataset(datasets.GeneratorBasedBuilder):
256
- def __init__(self, *args, additional_features=[], normalize_data=True, patch_size=15, **kwargs):
 
 
 
257
  self.inner_dataset_kwargs = kwargs
258
  self._is_streaming = False
259
  self.patch_size = patch_size
260
- self.normalize_data = normalize_data
 
 
 
 
 
 
 
261
  self.additional_features = additional_features
 
 
 
 
 
 
 
262
  super().__init__(*args, **kwargs)
263
 
264
  VERSION = datasets.Version("1.1.0")
265
 
266
-
267
  BUILDER_CONFIGS = [
268
  datasets.BuilderConfig(name="default", version=VERSION, description="Normalized data"),
269
  datasets.BuilderConfig(name="unnormalized", version=VERSION, description="Unnormalized data"),
@@ -293,8 +392,6 @@ class NewDataset(datasets.GeneratorBasedBuilder):
293
  citation=_CITATION,
294
  )
295
 
296
-
297
-
298
  def _split_generators(self, dl_manager):
299
  self.original_dataset = datasets.load_dataset("prs-eth/AGBD_raw", streaming=self._is_streaming)
300
  return [
@@ -305,38 +402,70 @@ class NewDataset(datasets.GeneratorBasedBuilder):
305
 
306
  def _generate_examples(self, split):
307
  for i, d in enumerate(self.original_dataset[split]):
308
- if self.normalize_data :
309
- patch = np.asarray(d["input"])
310
 
311
- else:
312
- patch = np.asarray(d["input"])
313
- patch[:12] = denormalize_bands(patch[:12], norm_values['S2_bands'],['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B8A', 'B09','B11', 'B12'])
314
- patch[12:14] = denormalize_bands(patch[12:14], norm_values['ALOS_bands'], ['HH', 'HV'])
315
- patch[14] = denormalize_data(patch[14], norm_values['CH']['ch'])
316
- patch[15] = denormalize_data(patch[15], norm_values['CH']['std'])
317
- lc_cos, lc_sin = patch[16], patch[17]
318
- lc = recover_lc_map(lc_cos, lc_sin)
319
- patch[16] = lc
320
- patch[17] = lc
321
- patch[18] = patch[18] * 100
322
- patch[19] = denormalize_data(patch[19], norm_values['DEM'])
 
 
 
 
 
 
 
 
 
 
323
 
 
 
 
 
 
 
 
 
 
324
 
325
- lat, lon = d["metadata"]["lat"],d["metadata"]["lon"]
 
326
 
327
- latlon_patch = encode_coords(lat, lon,(self.patch_size,self.patch_size))
 
 
328
 
 
 
329
 
 
 
 
 
 
 
 
 
330
  start_x = (patch.shape[1] - self.patch_size) // 2
331
  start_y = (patch.shape[2] - self.patch_size) // 2
332
- patch = patch[:, start_x:start_x + self.patch_size, start_y:start_y + self.patch_size]
333
 
334
- patch = np.concatenate([patch[:12],latlon_patch,patch[12:]],0)
335
 
336
- data = {'input': patch, 'label': d["label"]}
 
 
 
337
  for feat in self.additional_features:
338
  data[feat] = d["metadata"][feat]
339
 
340
  yield i, data
341
 
342
-
 
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
+
15
+ ############################################################################################################################
16
+ # IMPORTS
17
 
18
  import numpy as np
19
  import datasets
20
  from datasets import Value
21
  import pickle
22
+ import pandas as pd
23
+
24
+ ############################################################################################################################
25
+ # GLOBAL VARIABLES
26
+
27
+ # BibTeX citation
28
  _CITATION = """\
29
+ @misc{https://doi.org/10.48550/arxiv.2406.04928,
30
+ doi = {10.48550/ARXIV.2406.04928},
31
+ url = {https://arxiv.org/abs/2406.04928},
32
+ author = {Sialelli, Ghjulia and Peters, Torben and Wegner, Jan D. and Schindler, Konrad},
33
+ keywords = {Computer Vision and Pattern Recognition (cs.CV), Machine Learning (cs.LG), Image and Video Processing (eess.IV), FOS: Computer and information sciences, FOS: Computer and information sciences, FOS: Electrical engineering, electronic engineering, information engineering, FOS: Electrical engineering, electronic engineering, information engineering},
34
+ title = {AGBD: A Global-scale Biomass Dataset},
35
+ publisher = {arXiv},
36
+ year = {2024},
37
+ copyright = {Creative Commons Attribution Non Commercial Share Alike 4.0 International}
38
  }
39
  """
40
 
41
+ # Description of the dataset
 
42
  _DESCRIPTION = """\
43
+ This new dataset is a machine-learning ready dataset of high-resolution (10m), multi-modal satellite imagery, paired with AGB reference values from NASA’s Global Ecosystem Dynamics Investigation (GEDI) mission.
44
  """
45
 
46
  # TODO: Add a link to an official homepage for the dataset here
47
  _HOMEPAGE = ""
48
 
49
+ # License of the dataset
50
+ _LICENSE = "https://creativecommons.org/licenses/by-nc/4.0/"
 
 
51
 
52
+ # Metadata features
53
  feature_dtype = {'s2_num_days': Value('int16'),
54
  'gedi_num_days': Value('uint16'),
55
  'lat': Value('float32'),
 
64
  "solar_elev": Value('float32'),
65
  "urban_prop":Value('uint8')}
66
 
67
+ # Default input features configuration
68
+ default_input_features = {'S2_bands': ['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B8A', 'B09','B11', 'B12'],
69
+ 'S2_dates' : False, 'lat_lon': True, 'GEDI_dates': False, 'ALOS': True, 'CH': True, 'LC': True,
70
+ 'DEM': True, 'topo': False}
71
+
72
+ # Mapping from Sentinel-2 band to index in the data
73
+ s2_bands_idx = {'B01': 0, 'B02': 1, 'B03': 2, 'B04': 3, 'B05': 4, 'B06': 5, 'B07': 6, 'B08': 7, 'B8A': 8, 'B09': 9, 'B11': 10, 'B12': 11}
74
+
75
+ # Normalization values
76
+ norm_values = {
77
+ 'ALOS_bands': {
78
+ 'HH': {'mean': -10.381429, 'std': 8.561741, 'min': -83.0, 'max': 13.329468, 'p1': -83.0, 'p99': -2.1084213},
79
+ 'HV': {'mean': -16.722847, 'std': 8.718428, 'min': -83.0, 'max': 11.688309, 'p1': -83.0, 'p99': -7.563843}},
80
+ 'S2_bands':
81
+ {'B01': {'mean': 0.12478869, 'std': 0.024433358, 'min': 1e-04, 'max': 1.8808, 'p1': 0.0787, 'p99': 0.1944},
82
+ 'B02': {'mean': 0.13480005, 'std': 0.02822557, 'min': 1e-04, 'max': 2.1776, 'p1': 0.0925, 'p99': 0.2214},
83
+ 'B03': {'mean': 0.16031432, 'std': 0.032037303, 'min': 1e-04, 'max': 2.12, 'p1': 0.1035, 'p99': 0.2556},
84
+ 'B04': {'mean': 0.1532097, 'std': 0.038628064, 'min': 1e-04, 'max': 2.0032, 'p1': 0.1023, 'p99': 0.2816},
85
+ 'B05': {'mean': 0.20312776, 'std': 0.04205057, 'min': 0.0422, 'max': 1.7502, 'p1': 0.1178, 'p99': 0.3189},
86
+ 'B06': {'mean': 0.32636437, 'std': 0.07139242, 'min': 0.0502, 'max': 1.7245, 'p1': 0.1632, 'p99': 0.519},
87
+ 'B07': {'mean': 0.36605212, 'std': 0.08555025, 'min': 0.0616, 'max': 1.7149, 'p1': 0.1775, 'p99': 0.6075},
88
+ 'B08': {'mean': 0.3811653, 'std': 0.092815965, 'min': 1e-04, 'max': 1.7488, 'p1': 0.1691, 'p99': 0.646},
89
+ 'B8A': {'mean': 0.3910436, 'std': 0.0896364, 'min': 0.055, 'max': 1.688, 'p1': 0.187, 'p99': 0.6385},
90
+ 'B09': {'mean': 0.3910644, 'std': 0.0836445, 'min': 0.0012, 'max': 1.7915, 'p1': 0.2123, 'p99': 0.6238},
91
+ 'B11': {'mean': 0.2917373, 'std': 0.07472579, 'min': 0.0953, 'max': 1.648, 'p1': 0.1334, 'p99': 0.4827},
92
+ 'B12': {'mean': 0.21169408, 'std': 0.05880649, 'min': 0.0975, 'max': 1.6775, 'p1': 0.1149, 'p99': 0.3869}},
93
+ 'CH': {
94
+ 'ch': {'mean': 9.736144, 'std': 9.493601, 'min': 0.0, 'max': 61.0, 'p1': 0.0, 'p99': 38.0},
95
+ 'std': {'mean': 7.9882116, 'std': 4.549494, 'min': 0.0, 'max': 254.0, 'p1': 0.0, 'p99': 18.0}},
96
+ 'DEM': {
97
+ 'mean': 604.63727, 'std': 588.02094, 'min': -82.0, 'max': 5205.0, 'p1': 4.0, 'p99': 2297.0},
98
+ 'Sentinel_metadata': {
99
+ 'S2_vegetation_score': {'mean': 89.168724, 'std': 17.17321, 'min': 20.0, 'max': 100.0, 'p1': 29.0, 'p99': 100.0},
100
+ 'S2_date': {'mean': 299.1638, 'std': 192.87402, 'min': -165.0, 'max': 623.0, 'p1': -105.0, 'p99': 602.0}},
101
+ 'GEDI': {
102
+ 'agbd': {'mean': 66.97266, 'std': 98.66588, 'min': 0.0, 'max': 499.99985, 'p1': 0.0, 'p99': 429.7605},
103
+ 'agbd_se': {'mean': 8.360701, 'std': 4.211524, 'min': 2.981795, 'max': 25.041483, 'p1': 2.9819136, 'p99': 17.13577},
104
+ 'rh98': {'mean': 12.074685, 'std': 10.276359, 'min': -1.1200076, 'max': 111.990005, 'p1': 2.3599916, 'p99': 41.96},
105
+ 'date': {'mean': 361.7431, 'std': 175.37294, 'min': 0.0, 'max': 624.0, 'p1': 5.0, 'p99': 619.0}}
106
+ }
107
+
108
+ # Define the nodata values for each data source
109
+ NODATAVALS = {'S2_bands' : 0, 'CH': 255, 'ALOS_bands': -9999.0, 'DEM': -9999, 'LC': 255}
110
+
111
+ # Reference biomes, and derived metrics
112
+ REF_BIOMES = {20: 'Shrubs', 30: 'Herbaceous vegetation', 40: 'Cultivated', 90: 'Herbaceous wetland', 111: 'Closed-ENL', 112: 'Closed-EBL', 114: 'Closed-DBL', 115: 'Closed-mixed', 116: 'Closed-other', 121: 'Open-ENL', 122: 'Open-EBL', 124: 'Open-DBL', 125: 'Open-mixed', 126: 'Open-other'}
113
+ _biome_values_mapping = {v: i for i, v in enumerate(REF_BIOMES.keys())}
114
+ _ref_biome_values = [v for v in REF_BIOMES.keys()]
115
+
116
+ ############################################################################################################################
117
+ # Helper functions
118
+
119
+ def normalize_data(data, norm_values, norm_strat, nodata_value = None) :
120
  """
121
+ Normalize the data, according to various strategies:
122
+ - mean_std: subtract the mean and divide by the standard deviation
123
+ - pct: subtract the 1st percentile and divide by the 99th percentile
124
+ - min_max: subtract the minimum and divide by the maximum
125
 
126
  Args:
127
+ - data (np.array): the data to normalize
128
+ - norm_values (dict): the normalization values
129
+ - norm_strat (str): the normalization strategy
130
 
131
  Returns:
132
+ - normalized_data (np.array): the normalized data
133
  """
134
 
135
+ if norm_strat == 'mean_std' :
136
+ mean, std = norm_values['mean'], norm_values['std']
137
+ if nodata_value is not None :
138
+ data = np.where(data == nodata_value, 0, (data - mean) / std)
139
+ else : data = (data - mean) / std
140
+
141
+ elif norm_strat == 'pct' :
142
+ p1, p99 = norm_values['p1'], norm_values['p99']
143
+ if nodata_value is not None :
144
+ data = np.where(data == nodata_value, 0, (data - p1) / (p99 - p1))
145
+ else :
146
+ data = (data - p1) / (p99 - p1)
147
+ data = np.clip(data, 0, 1)
148
 
149
+ elif norm_strat == 'min_max' :
150
+ min_val, max_val = norm_values['min'], norm_values['max']
151
+ if nodata_value is not None :
152
+ data = np.where(data == nodata_value, 0, (data - min_val) / (max_val - min_val))
153
+ else:
154
+ data = (data - min_val) / (max_val - min_val)
155
+
156
+ else:
157
+ raise ValueError(f'Normalization strategy `{norm_strat}` is not valid.')
158
 
159
+ return data
160
 
161
 
162
+ def normalize_bands(bands_data, norm_values, order, norm_strat, nodata_value = None) :
163
  """
164
+ This function normalizes the bands data using the normalization values and strategy.
 
165
 
166
  Args:
167
+ - bands_data (np.array): the bands data to normalize
168
+ - norm_values (dict): the normalization values
169
+ - order (list): the order of the bands
170
+ - norm_strat (str): the normalization strategy
171
+ - nodata_value (int/float): the nodata value
172
 
173
  Returns:
174
+ - bands_data (np.array): the normalized bands data
175
  """
176
+
177
+ for i, band in enumerate(order) :
178
+ band_norm = norm_values[band]
179
+ bands_data[:, :, i] = normalize_data(bands_data[:, :, i], band_norm, norm_strat, nodata_value)
180
+
181
+ return bands_data
182
 
 
 
 
 
 
 
 
183
 
184
+ def one_hot(x) :
185
+ one_hot = np.zeros(len(_biome_values_mapping))
186
+ one_hot[_biome_values_mapping.get(x, 0)] = 1
187
+ return one_hot
188
 
189
+ def encode_biome(lc, encode_strat, embeddings = None) :
190
+ """
191
+ This function encodes the land cover data using different strategies: 1) sin/cosine encoding,
192
+ 2) cat2vec embeddings, 3) one-hot encoding.
193
 
194
+ Args:
195
+ - lc (np.array): the land cover data
196
+ - encode_strat (str): the encoding strategy
197
+ - embeddings (dict): the cat2vec embeddings
198
 
199
+ Returns:
200
+ - encoded_lc (np.array): the encoded land cover data
201
+ """
202
 
203
+ if encode_strat == 'sin_cos' :
204
+ # Encode the LC classes with sin/cosine values and scale the data to [0,1]
205
+ lc_cos = np.where(lc == NODATAVALS['LC'], 0, (np.cos(2 * np.pi * lc / 201) + 1) / 2)
206
+ lc_sin = np.where(lc == NODATAVALS['LC'], 0, (np.sin(2 * np.pi * lc / 201) + 1) / 2)
207
+ return np.stack([lc_cos, lc_sin], axis = -1).astype(np.float32)
208
+
209
+ elif encode_strat == 'cat2vec' :
210
+ # Embed the LC classes using the cat2vec embeddings
211
+ lc_cat2vec = np.vectorize(lambda x: embeddings.get(x, embeddings.get(0)), signature = '()->(n)')(lc)
212
+ return lc_cat2vec.astype(np.float32)
213
 
214
+ elif encode_strat == 'onehot' :
215
+ lc_onehot = np.vectorize(one_hot, signature = '() -> (n)')(lc).astype(np.float32)
216
+ return lc_onehot
217
 
218
+ else: raise ValueError(f'Encoding strategy `{encode_strat}` is not valid.')
 
219
 
220
 
221
+ def compute_num_features(input_features, encode_strat) :
222
  """
223
+ This function computes the number of features that will be used in the model.
 
 
 
224
 
225
  Args:
226
+ - input_features (dict): the input features configuration
227
+ - encode_strat (str): the encoding strategy
 
228
 
229
  Returns:
230
+ - num_features (int): the number of features
231
  """
232
 
233
+ num_features = len(input_features['S2_bands'])
234
+ if input_features['S2_dates'] : num_features += 3
235
+ if input_features['lat_lon'] : num_features += 4
236
+ if input_features['GEDI_dates'] : num_features += 3
237
+ if input_features['ALOS'] : num_features += 2
238
+ if input_features['CH'] : num_features += 2
239
+ if input_features['LC'] :
240
+ num_features += 1
241
+ if encode_strat == 'sin_cos' : num_features += 2
242
+ elif encode_strat == 'cat2vec' : num_features += 5
243
+ elif encode_strat == 'onehot' : num_features += len(REF_BIOMES)
244
+ if input_features['DEM'] : num_features += 1
245
+ if input_features['topo'] : num_features += 3
246
 
247
+ return num_features
 
 
248
 
 
 
 
249
 
250
+ def concatenate_features(patch, lc_patch, input_features, encode_strat) :
 
 
 
 
 
 
251
  """
252
+ This function concatenates the features that the user requested.
253
 
254
  Args:
255
+ - patch (np.array): the patch data
256
+ - lc_patch (np.array): the land cover data
257
+ - input_features (dict): the input features configuration
258
+ - encode_strat (str): the encoding strategy
259
 
260
  Returns:
261
+ - out_patch (np.array): the concatenated features
262
  """
263
 
264
+ # Compute the number of features
265
+ num_features = compute_num_features(input_features, encode_strat)
266
+ out_patch = np.zeros((num_features, patch.shape[1], patch.shape[2]), dtype = np.float32)
267
 
268
+ # Concatenate the features
269
+ current_idx = 0
270
 
271
+ # Sentinel-2 bands
272
+ s2_indices = [s2_bands_idx[band] for band in input_features['S2_bands']]
273
+ out_patch[: current_idx + len(s2_indices)] = patch[s2_indices]
274
+ current_idx += len(s2_indices)
 
 
 
 
 
 
 
 
 
275
 
276
+ # S2 dates
277
+ if input_features['S2_dates'] :
278
+ out_patch[current_idx : current_idx + 3] = patch[12:15]
279
+ current_idx += 3
280
 
281
+ # Lat/Lon
282
+ if input_features['lat_lon'] :
283
+ out_patch[current_idx : current_idx + 4] = patch[15:19]
284
+ current_idx += 4
 
285
 
286
+ # GEDI dates
287
+ if input_features['GEDI_dates'] :
288
+ out_patch[current_idx : current_idx + 3] = patch[19:22]
289
+ current_idx += 3
290
+
291
+ # ALOS bands
292
+ if input_features['ALOS'] :
293
+ out_patch[current_idx : current_idx + 2] = patch[22:24]
294
+ current_idx += 2
295
 
296
+ # CH bands
297
+ if input_features['CH'] :
298
+ out_patch[current_idx] = patch[24]
299
+ out_patch[current_idx + 1] = patch[25]
300
+ current_idx += 2
301
+
302
+ # LC data
303
+ if input_features['LC'] :
304
+
305
+ # LC encoding
306
+ if encode_strat == 'sin_cos' :
307
+ out_patch[current_idx : current_idx + 2] = lc_patch
308
+ current_idx += 2
309
+ elif encode_strat == 'cat2vec' :
310
+ out_patch[current_idx : current_idx + 5] = lc_patch
311
+ current_idx += 5
312
+ elif encode_strat == 'onehot' :
313
+ out_patch[current_idx : current_idx + len(REF_BIOMES)] = lc_patch
314
+ current_idx += len(REF_BIOMES)
315
+ elif encode_strat == 'none' :
316
+ out_patch[current_idx] = lc_patch
317
+ current_idx += 1
318
+
319
+ # LC probability
320
+ out_patch[current_idx] = patch[27]
321
+ current_idx += 1
322
+
323
+ # Topographic data
324
+ if input_features['topo'] :
325
+ out_patch[current_idx : current_idx + 3] = patch[28:31]
326
+ current_idx += 3
327
+
328
+ # DEM
329
+ if input_features['DEM'] :
330
+ out_patch[current_idx] = patch[31]
331
+ current_idx += 1
332
+
333
+ return out_patch
334
 
335
+ #########################################################################################################################
336
+ # DATASET CLASS DEFINITION
337
 
338
  class NewDataset(datasets.GeneratorBasedBuilder):
339
+ """DatasetBuilder for AGBD dataset."""
340
+ def __init__(self, *args, input_features = default_input_features, additional_features = [], norm_strat = 'pct',
341
+ encode_strat = 'sin_cos', patch_size = 15, **kwargs):
342
+
343
  self.inner_dataset_kwargs = kwargs
344
  self._is_streaming = False
345
  self.patch_size = patch_size
346
+
347
+ assert norm_strat in ['mean_std', 'pct', 'none'], f'Normalization strategy `{norm_strat}` is not valid.'
348
+ self.norm_strat = norm_strat
349
+
350
+ assert encode_strat in ['sin_cos', 'cat2vec', 'onehot', 'none'], f'Encoding strategy `{encode_strat}` is not valid.'
351
+ self.encode_strat = encode_strat
352
+
353
+ self.input_features = input_features
354
  self.additional_features = additional_features
355
+
356
+ if self.encode_strat == 'cat2vec' :
357
+ embeddings = pd.read_csv("embeddings_train.csv")
358
+ embeddings = dict([(v,np.array([a,b,c,d,e])) for v, a,b,c,d,e in zip(embeddings.mapping, embeddings.dim0, embeddings.dim1, embeddings.dim2, embeddings.dim3, embeddings.dim4)])
359
+ self.embeddings = embeddings
360
+ else: self.embeddings = None
361
+
362
  super().__init__(*args, **kwargs)
363
 
364
  VERSION = datasets.Version("1.1.0")
365
 
 
366
  BUILDER_CONFIGS = [
367
  datasets.BuilderConfig(name="default", version=VERSION, description="Normalized data"),
368
  datasets.BuilderConfig(name="unnormalized", version=VERSION, description="Unnormalized data"),
 
392
  citation=_CITATION,
393
  )
394
 
 
 
395
  def _split_generators(self, dl_manager):
396
  self.original_dataset = datasets.load_dataset("prs-eth/AGBD_raw", streaming=self._is_streaming)
397
  return [
 
402
 
403
  def _generate_examples(self, split):
404
  for i, d in enumerate(self.original_dataset[split]):
 
 
405
 
406
+ patch = np.asarray(d["input"])
407
+
408
+ # ------------------------------------------------------------------------------------------------
409
+ # Process the data that needs to be processed
410
+
411
+ # Structure of the d["input"] data:
412
+ # - 12 x Sentinel-2 bands
413
+ # - 3 x S2 dates bands (s2_num_days, s2_doy_cos, s2_doy_sin)
414
+ # - 4 x lat/lon (lat_cos, lat_sin, lon_cos, lon_sin)
415
+ # - 3 x GEDI dates bands (gedi_num_days, gedi_doy_cos, gedi_doy_sin)
416
+ # - 2 x ALOS bands (HH, HV)
417
+ # - 2 x CH bands (ch, std)
418
+ # - 2 x LC bands (lc encoding, lc_prob)
419
+ # - 4 x DEM bands (slope, aspect_cos, aspect_sin, dem)
420
+
421
+ if self.norm_strat != 'none' :
422
+
423
+ # Normalize S2 bands
424
+ patch[:12] = normalize_bands(patch[:12], norm_values['S2_bands'], ['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B8A', 'B09','B11', 'B12'], self.norm_strat, NODATAVALS['S2_bands'])
425
+
426
+ # Normalize s2_num_days
427
+ patch[12] = normalize_data(patch[12], norm_values['Sentinel_metadata']['S2_date'], 'min_max' if self.norm_strat == 'pct' else self.norm_strat)
428
 
429
+ # Normalize gedi_num_days
430
+ patch[19] = normalize_data(patch[19], norm_values['GEDI']['date'], 'min_max' if self.norm_strat == 'pct' else self.norm_strat)
431
+
432
+ # Normalize ALOS bands
433
+ patch[22:24] = normalize_bands(patch[22:24], norm_values['ALOS_bands'], ['HH', 'HV'], self.norm_strat, NODATAVALS['ALOS_bands'])
434
+
435
+ # Normalize CH bands
436
+ patch[24] = normalize_data(patch[24], norm_values['CH']['ch'], self.norm_strat, NODATAVALS['CH'])
437
+ patch[25] = normalize_data(patch[25], norm_values['CH']['std'], self.norm_strat, NODATAVALS['CH'])
438
 
439
+ # Normalize DEM bands
440
+ patch[31] = normalize_data(patch[31], norm_values['DEM'], self.norm_strat, NODATAVALS['DEM'])
441
 
442
+ # Encode LC data
443
+ if self.encode_strat != 'none' : lc_patch = encode_biome(patch[26], self.encode_strat, self.embeddings).swapaxes(-1,0)
444
+ else: lc_patch = patch[26]
445
 
446
+ # Put lc_prob in [0,1] range
447
+ patch[27] = patch[27] / 100
448
 
449
+ # ------------------------------------------------------------------------------------------------
450
+ # Concatenate the features that the user requested
451
+
452
+ out_patch = concatenate_features(patch, lc_patch, self.input_features, self.encode_strat)
453
+
454
+ # ------------------------------------------------------------------------------------------------
455
+
456
+ # Crop to the patch size
457
  start_x = (patch.shape[1] - self.patch_size) // 2
458
  start_y = (patch.shape[2] - self.patch_size) // 2
459
+ out_patch = out_patch[:, start_x : start_x + self.patch_size, start_y : start_y + self.patch_size]
460
 
461
+ # ------------------------------------------------------------------------------------------------
462
 
463
+ # Create the data dictionary
464
+ data = {'input': out_patch, 'label': d["label"]}
465
+
466
+ # Add the additional features
467
  for feat in self.additional_features:
468
  data[feat] = d["metadata"][feat]
469
 
470
  yield i, data
471