yuxuanw8 commited on
Commit
8dbbd8d
·
verified ·
1 Parent(s): 0ee1611

Delete BigEarthNet.py

Browse files
Files changed (1) hide show
  1. BigEarthNet.py +0 -270
BigEarthNet.py DELETED
@@ -1,270 +0,0 @@
1
- import os
2
- import json
3
- import shutil
4
- import string
5
- import tifffile
6
- import datasets
7
-
8
- import numpy as np
9
- import pandas as pd
10
-
11
- class_sets = {
12
- 19: [
13
- 'Urban fabric',
14
- 'Industrial or commercial units',
15
- 'Arable land',
16
- 'Permanent crops',
17
- 'Pastures',
18
- 'Complex cultivation patterns',
19
- 'Land principally occupied by agriculture, with significant areas of'
20
- ' natural vegetation',
21
- 'Agro-forestry areas',
22
- 'Broad-leaved forest',
23
- 'Coniferous forest',
24
- 'Mixed forest',
25
- 'Natural grassland and sparsely vegetated areas',
26
- 'Moors, heathland and sclerophyllous vegetation',
27
- 'Transitional woodland, shrub',
28
- 'Beaches, dunes, sands',
29
- 'Inland wetlands',
30
- 'Coastal wetlands',
31
- 'Inland waters',
32
- 'Marine waters',
33
- ],
34
- 43: [
35
- 'Continuous urban fabric',
36
- 'Discontinuous urban fabric',
37
- 'Industrial or commercial units',
38
- 'Road and rail networks and associated land',
39
- 'Port areas',
40
- 'Airports',
41
- 'Mineral extraction sites',
42
- 'Dump sites',
43
- 'Construction sites',
44
- 'Green urban areas',
45
- 'Sport and leisure facilities',
46
- 'Non-irrigated arable land',
47
- 'Permanently irrigated land',
48
- 'Rice fields',
49
- 'Vineyards',
50
- 'Fruit trees and berry plantations',
51
- 'Olive groves',
52
- 'Pastures',
53
- 'Annual crops associated with permanent crops',
54
- 'Complex cultivation patterns',
55
- 'Land principally occupied by agriculture, with significant areas of'
56
- ' natural vegetation',
57
- 'Agro-forestry areas',
58
- 'Broad-leaved forest',
59
- 'Coniferous forest',
60
- 'Mixed forest',
61
- 'Natural grassland',
62
- 'Moors and heathland',
63
- 'Sclerophyllous vegetation',
64
- 'Transitional woodland/shrub',
65
- 'Beaches, dunes, sands',
66
- 'Bare rock',
67
- 'Sparsely vegetated areas',
68
- 'Burnt areas',
69
- 'Inland marshes',
70
- 'Peatbogs',
71
- 'Salt marshes',
72
- 'Salines',
73
- 'Intertidal flats',
74
- 'Water courses',
75
- 'Water bodies',
76
- 'Coastal lagoons',
77
- 'Estuaries',
78
- 'Sea and ocean',
79
- ],
80
- }
81
-
82
- label_converter = {
83
- 0: 0,
84
- 1: 0,
85
- 2: 1,
86
- 11: 2,
87
- 12: 2,
88
- 13: 2,
89
- 14: 3,
90
- 15: 3,
91
- 16: 3,
92
- 18: 3,
93
- 17: 4,
94
- 19: 5,
95
- 20: 6,
96
- 21: 7,
97
- 22: 8,
98
- 23: 9,
99
- 24: 10,
100
- 25: 11,
101
- 31: 11,
102
- 26: 12,
103
- 27: 12,
104
- 28: 13,
105
- 29: 14,
106
- 33: 15,
107
- 34: 15,
108
- 35: 16,
109
- 36: 16,
110
- 38: 17,
111
- 39: 17,
112
- 40: 18,
113
- 41: 18,
114
- 42: 18,
115
- }
116
-
117
- S2_MEAN = [752.40087073, 884.29673756, 1144.16202635, 1297.47289228, 1624.90992062, 2194.6423161, 2422.21248945, 2517.76053101, 2581.64687018, 2645.51888987, 2368.51236873, 1805.06846033]
118
- S2_STD = [1108.02887453, 1155.15170768, 1183.6292542, 1368.11351514, 1370.265037, 1355.55390699, 1416.51487101, 1474.78900051, 1439.3086061, 1582.28010962, 1455.52084939, 1343.48379601]
119
-
120
- S1_MEAN = [-12.54847273, -20.19237134]
121
- S1_STD = [5.25697717, 5.91150917]
122
-
123
- parts = [f"a{letter}" for letter in string.ascii_lowercase]
124
- parts.extend([f"b{letter}" for letter in string.ascii_lowercase[:8]])
125
-
126
- class BigEarthNetDataset(datasets.GeneratorBasedBuilder):
127
- VERSION = datasets.Version("1.0.0")
128
-
129
- DATA_URL = [
130
- f"https://huggingface.co/datasets/GFM-Bench/BigEarthNet/resolve/main/data/bigearthnet_part_{part}"
131
- for part in parts
132
- ]
133
-
134
- metadata = {
135
- "s2c": {
136
- "bands":["B1", "B2", "B3", "B4", "B5", "B6", "B7", "B8", "B8A", "B9", "B11", "B12"],
137
- "channel_wv": [442.7, 492.4, 559.8, 664.6, 704.1, 740.5, 782.8, 832.8, 864.7, 945.1, 1613.7, 2202.4],
138
- "mean": S2_MEAN,
139
- "std": S2_STD
140
- },
141
- "s1": {
142
- "bands": ["VV", "VH"],
143
- "channel_wv": [5500, 5700],
144
- "mean": S1_MEAN,
145
- "std": S1_STD
146
- }
147
- }
148
-
149
- SIZE = HEIGHT = WIDTH = 120
150
-
151
- NUM_CLASSES = 19
152
-
153
- spatial_resolution = 10
154
-
155
- def __init__(self, *args, **kwargs):
156
- self.class2idx = {c: i for i, c in enumerate(class_sets[43])}
157
-
158
- super().__init__(*args, **kwargs)
159
-
160
- def _info(self):
161
- metadata = self.metadata
162
- metadata['size'] = self.SIZE
163
- metadata['num_classes'] = self.NUM_CLASSES
164
- metadata['spatial_resolution'] = self.spatial_resolution
165
- return datasets.DatasetInfo(
166
- description=json.dumps(metadata),
167
- features=datasets.Features({
168
- "optical": datasets.Array3D(shape=(12, self.HEIGHT, self.WIDTH), dtype="float32"),
169
- "radar": datasets.Array3D(shape=(2, self.HEIGHT, self.WIDTH), dtype="float32"),
170
- "optical_channel_wv": datasets.Sequence(datasets.Value("float32")),
171
- "radar_channel_wv": datasets.Sequence(datasets.Value("float32")),
172
- "label": datasets.Sequence(datasets.Value("float32"), length=self.NUM_CLASSES),
173
- "spatial_resolution": datasets.Value("int32"),
174
- }),
175
- )
176
-
177
- def _split_generators(self, dl_manager):
178
- if isinstance(self.DATA_URL, list):
179
- try:
180
- downloaded_files = dl_manager.download(self.DATA_URL)
181
- combined_file = os.path.join(dl_manager.download_config.cache_dir, "combined.tar.gz")
182
- with open(combined_file, 'wb') as outfile:
183
- for part_file in downloaded_files:
184
- with open(part_file, 'rb') as infile:
185
- shutil.copyfileobj(infile, outfile)
186
- data_dir = dl_manager.extract(combined_file)
187
- os.remove(combined_file)
188
- except:
189
- data_dir = None
190
- else:
191
- data_dir = dl_manager.download_and_extract(self.DATA_URL)
192
-
193
- return [
194
- datasets.SplitGenerator(
195
- name="train",
196
- gen_kwargs={
197
- "split": 'train',
198
- "data_dir": data_dir,
199
- },
200
- ),
201
- datasets.SplitGenerator(
202
- name="val",
203
- gen_kwargs={
204
- "split": 'val',
205
- "data_dir": data_dir,
206
- },
207
- ),
208
- datasets.SplitGenerator(
209
- name="test",
210
- gen_kwargs={
211
- "split": 'test',
212
- "data_dir": data_dir,
213
- },
214
- )
215
- ]
216
-
217
- def _generate_examples(self, split, data_dir):
218
- optical_channel_wv = np.array(self.metadata["s2c"]["channel_wv"])
219
- radar_channel_wv = np.array(self.metadata["s1"]["channel_wv"])
220
- spatial_resolution = self.spatial_resolution
221
-
222
- data_dir = os.path.join(data_dir, "BigEarthNet")
223
- metadata = pd.read_csv(os.path.join(data_dir, "metadata.csv"))
224
- metadata = metadata[metadata["split"] == split].reset_index(drop=True)
225
-
226
- for index, row in metadata.iterrows():
227
- optical_path = os.path.join(data_dir, row.optical_path)
228
- optical = self._read_image(optical_path).astype(np.float32) # CxHxW
229
-
230
- radar_path = os.path.join(data_dir, row.radar_path)
231
- radar = self._read_image(radar_path).astype(np.float32)
232
-
233
- label_path = os.path.join(data_dir, row.label_path)
234
- label = self._load_label(label_path)
235
-
236
- sample = {
237
- "optical": optical,
238
- "radar": radar,
239
- "optical_channel_wv": optical_channel_wv,
240
- "radar_channel_wv": radar_channel_wv,
241
- "label": label,
242
- "spatial_resolution": spatial_resolution,
243
- }
244
-
245
- yield f"{index}", sample
246
-
247
- def _load_label(self, label_path):
248
- with open(label_path) as f:
249
- labels = json.load(f)['labels']
250
- indices =[self.class2idx[label] for label in labels]
251
- indices_optional = [label_converter.get(idx) for idx in indices]
252
- indices = [idx for idx in indices_optional if idx is not None]
253
- label = np.zeros(19, dtype=np.int64)
254
- label[indices] = 1
255
- return label
256
-
257
- def _read_image(self, image_path):
258
- """Read tiff image from image_path
259
- Args:
260
- image_path:
261
- Image path to read from
262
-
263
- Return:
264
- image:
265
- C, H, W numpy array image
266
- """
267
- image = tifffile.imread(image_path)
268
- image = np.transpose(image, (2, 0, 1))
269
-
270
- return image