yuxuanw8 commited on
Commit
400a63f
·
verified ·
1 Parent(s): 9fbffca

Upload BigEarthNet.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. BigEarthNet.py +276 -0
BigEarthNet.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import shutil
4
+ import string
5
+ import tifffile
6
+ import datasets
7
+
8
+ import numpy as np
9
+ import pandas as pd
10
+
11
+ class_sets = {
12
+ 19: [
13
+ 'Urban fabric',
14
+ 'Industrial or commercial units',
15
+ 'Arable land',
16
+ 'Permanent crops',
17
+ 'Pastures',
18
+ 'Complex cultivation patterns',
19
+ 'Land principally occupied by agriculture, with significant areas of'
20
+ ' natural vegetation',
21
+ 'Agro-forestry areas',
22
+ 'Broad-leaved forest',
23
+ 'Coniferous forest',
24
+ 'Mixed forest',
25
+ 'Natural grassland and sparsely vegetated areas',
26
+ 'Moors, heathland and sclerophyllous vegetation',
27
+ 'Transitional woodland, shrub',
28
+ 'Beaches, dunes, sands',
29
+ 'Inland wetlands',
30
+ 'Coastal wetlands',
31
+ 'Inland waters',
32
+ 'Marine waters',
33
+ ],
34
+ 43: [
35
+ 'Continuous urban fabric',
36
+ 'Discontinuous urban fabric',
37
+ 'Industrial or commercial units',
38
+ 'Road and rail networks and associated land',
39
+ 'Port areas',
40
+ 'Airports',
41
+ 'Mineral extraction sites',
42
+ 'Dump sites',
43
+ 'Construction sites',
44
+ 'Green urban areas',
45
+ 'Sport and leisure facilities',
46
+ 'Non-irrigated arable land',
47
+ 'Permanently irrigated land',
48
+ 'Rice fields',
49
+ 'Vineyards',
50
+ 'Fruit trees and berry plantations',
51
+ 'Olive groves',
52
+ 'Pastures',
53
+ 'Annual crops associated with permanent crops',
54
+ 'Complex cultivation patterns',
55
+ 'Land principally occupied by agriculture, with significant areas of'
56
+ ' natural vegetation',
57
+ 'Agro-forestry areas',
58
+ 'Broad-leaved forest',
59
+ 'Coniferous forest',
60
+ 'Mixed forest',
61
+ 'Natural grassland',
62
+ 'Moors and heathland',
63
+ 'Sclerophyllous vegetation',
64
+ 'Transitional woodland/shrub',
65
+ 'Beaches, dunes, sands',
66
+ 'Bare rock',
67
+ 'Sparsely vegetated areas',
68
+ 'Burnt areas',
69
+ 'Inland marshes',
70
+ 'Peatbogs',
71
+ 'Salt marshes',
72
+ 'Salines',
73
+ 'Intertidal flats',
74
+ 'Water courses',
75
+ 'Water bodies',
76
+ 'Coastal lagoons',
77
+ 'Estuaries',
78
+ 'Sea and ocean',
79
+ ],
80
+ }
81
+
82
+ label_converter = {
83
+ 0: 0,
84
+ 1: 0,
85
+ 2: 1,
86
+ 11: 2,
87
+ 12: 2,
88
+ 13: 2,
89
+ 14: 3,
90
+ 15: 3,
91
+ 16: 3,
92
+ 18: 3,
93
+ 17: 4,
94
+ 19: 5,
95
+ 20: 6,
96
+ 21: 7,
97
+ 22: 8,
98
+ 23: 9,
99
+ 24: 10,
100
+ 25: 11,
101
+ 31: 11,
102
+ 26: 12,
103
+ 27: 12,
104
+ 28: 13,
105
+ 29: 14,
106
+ 33: 15,
107
+ 34: 15,
108
+ 35: 16,
109
+ 36: 16,
110
+ 38: 17,
111
+ 39: 17,
112
+ 40: 18,
113
+ 41: 18,
114
+ 42: 18,
115
+ }
116
+
117
+ S2_MEAN = [752.40087073, 884.29673756, 1144.16202635, 1297.47289228, 1624.90992062, 2194.6423161, 2422.21248945, 2517.76053101, 2581.64687018, 2645.51888987, 2368.51236873, 1805.06846033]
118
+ S2_STD = [1108.02887453, 1155.15170768, 1183.6292542, 1368.11351514, 1370.265037, 1355.55390699, 1416.51487101, 1474.78900051, 1439.3086061, 1582.28010962, 1455.52084939, 1343.48379601]
119
+
120
+ S1_MEAN = [-12.54847273, -20.19237134]
121
+ S1_STD = [5.25697717, 5.91150917]
122
+
123
+ parts = [f"a{letter}" for letter in string.ascii_lowercase]
124
+ parts.extend([f"b{letter}" for letter in string.ascii_lowercase[:8]])
125
+
126
+ class BigEarthNetDataset(datasets.GeneratorBasedBuilder):
127
+ VERSION = datasets.Version("1.0.0")
128
+
129
+ DATA_URL = [
130
+ f"https://huggingface.co/datasets/GFM-Bench/BigEarthNet/resolve/main/data/bigearthnet_part_{part}"
131
+ for part in parts
132
+ ]
133
+
134
+ metadata = {
135
+ "s2c": {
136
+ "bands":["B1", "B2", "B3", "B4", "B5", "B6", "B7", "B8", "B8A", "B9", "B11", "B12"],
137
+ "channel_wv": [442.7, 492.4, 559.8, 664.6, 704.1, 740.5, 782.8, 832.8, 864.7, 945.1, 1613.7, 2202.4],
138
+ "mean": S2_MEAN,
139
+ "std": S2_STD
140
+ },
141
+ "s1": {
142
+ "bands": ["VV", "VH"],
143
+ "channel_wv": [5500, 5700],
144
+ "mean": S1_MEAN,
145
+ "std": S1_STD
146
+ }
147
+ }
148
+
149
+ SIZE = HEIGHT = WIDTH = 120
150
+
151
+ NUM_CLASSES = 19
152
+
153
+ spatial_resolution = 10
154
+
155
+ def __init__(self, *args, **kwargs):
156
+ self.class2idx = {c: i for i, c in enumerate(class_sets[43])}
157
+
158
+ super().__init__(*args, **kwargs)
159
+
160
+ def _info(self):
161
+ metadata = self.metadata
162
+ metadata['size'] = self.SIZE
163
+ metadata['num_classes'] = self.NUM_CLASSES
164
+ metadata['spatial_resolution'] = self.spatial_resolution
165
+ return datasets.DatasetInfo(
166
+ description=json.dumps(metadata),
167
+ features=datasets.Features({
168
+ "optical": datasets.Array3D(shape=(12, self.HEIGHT, self.WIDTH), dtype="float32"),
169
+ "radar": datasets.Array3D(shape=(2, self.HEIGHT, self.WIDTH), dtype="float32"),
170
+ "optical_channel_wv": datasets.Sequence(datasets.Value("float32")),
171
+ "radar_channel_wv": datasets.Sequence(datasets.Value("float32")),
172
+ "label": datasets.Sequence(datasets.Value("float32"), length=self.NUM_CLASSES),
173
+ "spatial_resolution": datasets.Value("int32"),
174
+ }),
175
+ )
176
+
177
+ def _split_generators(self, dl_manager):
178
+ if isinstance(self.DATA_URL, list):
179
+ try:
180
+ print("Downloading data files from HF")
181
+ downloaded_files = dl_manager.download(self.DATA_URL)
182
+ print("Downloading Finished")
183
+ combined_file = os.path.join(dl_manager.download_config.cache_dir, "combined.tar.gz")
184
+ with open(combined_file, 'wb') as outfile:
185
+ counter = 0
186
+ for part_file in downloaded_files:
187
+ print(f"copying {counter}-th file")
188
+ with open(part_file, 'rb') as infile:
189
+ shutil.copyfileobj(infile, outfile)
190
+ data_dir = dl_manager.extract(combined_file)
191
+ os.remove(combined_file)
192
+ except Exception as e:
193
+ # Print the error message
194
+ print(f"An error occurred: {e}, setting data_dir to None")
195
+ data_dir = None
196
+ else:
197
+ data_dir = dl_manager.download_and_extract(self.DATA_URL)
198
+
199
+ return [
200
+ datasets.SplitGenerator(
201
+ name="train",
202
+ gen_kwargs={
203
+ "split": 'train',
204
+ "data_dir": data_dir,
205
+ },
206
+ ),
207
+ datasets.SplitGenerator(
208
+ name="val",
209
+ gen_kwargs={
210
+ "split": 'val',
211
+ "data_dir": data_dir,
212
+ },
213
+ ),
214
+ datasets.SplitGenerator(
215
+ name="test",
216
+ gen_kwargs={
217
+ "split": 'test',
218
+ "data_dir": data_dir,
219
+ },
220
+ )
221
+ ]
222
+
223
+ def _generate_examples(self, split, data_dir):
224
+ optical_channel_wv = np.array(self.metadata["s2c"]["channel_wv"])
225
+ radar_channel_wv = np.array(self.metadata["s1"]["channel_wv"])
226
+ spatial_resolution = self.spatial_resolution
227
+
228
+ data_dir = os.path.join(data_dir, "BigEarthNet")
229
+ metadata = pd.read_csv(os.path.join(data_dir, "metadata.csv"))
230
+ metadata = metadata[metadata["split"] == split].reset_index(drop=True)
231
+
232
+ for index, row in metadata.iterrows():
233
+ optical_path = os.path.join(data_dir, row.optical_path)
234
+ optical = self._read_image(optical_path).astype(np.float32) # CxHxW
235
+
236
+ radar_path = os.path.join(data_dir, row.radar_path)
237
+ radar = self._read_image(radar_path).astype(np.float32)
238
+
239
+ label_path = os.path.join(data_dir, row.label_path)
240
+ label = self._load_label(label_path)
241
+
242
+ sample = {
243
+ "optical": optical,
244
+ "radar": radar,
245
+ "optical_channel_wv": optical_channel_wv,
246
+ "radar_channel_wv": radar_channel_wv,
247
+ "label": label,
248
+ "spatial_resolution": spatial_resolution,
249
+ }
250
+
251
+ yield f"{index}", sample
252
+
253
+ def _load_label(self, label_path):
254
+ with open(label_path) as f:
255
+ labels = json.load(f)['labels']
256
+ indices =[self.class2idx[label] for label in labels]
257
+ indices_optional = [label_converter.get(idx) for idx in indices]
258
+ indices = [idx for idx in indices_optional if idx is not None]
259
+ label = np.zeros(19, dtype=np.int64)
260
+ label[indices] = 1
261
+ return label
262
+
263
+ def _read_image(self, image_path):
264
+ """Read tiff image from image_path
265
+ Args:
266
+ image_path:
267
+ Image path to read from
268
+
269
+ Return:
270
+ image:
271
+ C, H, W numpy array image
272
+ """
273
+ image = tifffile.imread(image_path)
274
+ image = np.transpose(image, (2, 0, 1))
275
+
276
+ return image