| import os |
| import json |
| import shutil |
| import string |
| import tifffile |
| import datasets |
|
|
| import numpy as np |
| import pandas as pd |
|
|
| class_sets = { |
| 19: [ |
| 'Urban fabric', |
| 'Industrial or commercial units', |
| 'Arable land', |
| 'Permanent crops', |
| 'Pastures', |
| 'Complex cultivation patterns', |
| 'Land principally occupied by agriculture, with significant areas of' |
| ' natural vegetation', |
| 'Agro-forestry areas', |
| 'Broad-leaved forest', |
| 'Coniferous forest', |
| 'Mixed forest', |
| 'Natural grassland and sparsely vegetated areas', |
| 'Moors, heathland and sclerophyllous vegetation', |
| 'Transitional woodland, shrub', |
| 'Beaches, dunes, sands', |
| 'Inland wetlands', |
| 'Coastal wetlands', |
| 'Inland waters', |
| 'Marine waters', |
| ], |
| 43: [ |
| 'Continuous urban fabric', |
| 'Discontinuous urban fabric', |
| 'Industrial or commercial units', |
| 'Road and rail networks and associated land', |
| 'Port areas', |
| 'Airports', |
| 'Mineral extraction sites', |
| 'Dump sites', |
| 'Construction sites', |
| 'Green urban areas', |
| 'Sport and leisure facilities', |
| 'Non-irrigated arable land', |
| 'Permanently irrigated land', |
| 'Rice fields', |
| 'Vineyards', |
| 'Fruit trees and berry plantations', |
| 'Olive groves', |
| 'Pastures', |
| 'Annual crops associated with permanent crops', |
| 'Complex cultivation patterns', |
| 'Land principally occupied by agriculture, with significant areas of' |
| ' natural vegetation', |
| 'Agro-forestry areas', |
| 'Broad-leaved forest', |
| 'Coniferous forest', |
| 'Mixed forest', |
| 'Natural grassland', |
| 'Moors and heathland', |
| 'Sclerophyllous vegetation', |
| 'Transitional woodland/shrub', |
| 'Beaches, dunes, sands', |
| 'Bare rock', |
| 'Sparsely vegetated areas', |
| 'Burnt areas', |
| 'Inland marshes', |
| 'Peatbogs', |
| 'Salt marshes', |
| 'Salines', |
| 'Intertidal flats', |
| 'Water courses', |
| 'Water bodies', |
| 'Coastal lagoons', |
| 'Estuaries', |
| 'Sea and ocean', |
| ], |
| } |
|
|
| label_converter = { |
| 0: 0, |
| 1: 0, |
| 2: 1, |
| 11: 2, |
| 12: 2, |
| 13: 2, |
| 14: 3, |
| 15: 3, |
| 16: 3, |
| 18: 3, |
| 17: 4, |
| 19: 5, |
| 20: 6, |
| 21: 7, |
| 22: 8, |
| 23: 9, |
| 24: 10, |
| 25: 11, |
| 31: 11, |
| 26: 12, |
| 27: 12, |
| 28: 13, |
| 29: 14, |
| 33: 15, |
| 34: 15, |
| 35: 16, |
| 36: 16, |
| 38: 17, |
| 39: 17, |
| 40: 18, |
| 41: 18, |
| 42: 18, |
| } |
|
|
| S2_MEAN = [752.40087073, 884.29673756, 1144.16202635, 1297.47289228, 1624.90992062, 2194.6423161, 2422.21248945, 2517.76053101, 2581.64687018, 2645.51888987, 2368.51236873, 1805.06846033] |
| S2_STD = [1108.02887453, 1155.15170768, 1183.6292542, 1368.11351514, 1370.265037, 1355.55390699, 1416.51487101, 1474.78900051, 1439.3086061, 1582.28010962, 1455.52084939, 1343.48379601] |
|
|
| S1_MEAN = [-12.54847273, -20.19237134] |
| S1_STD = [5.25697717, 5.91150917] |
|
|
| parts = [f"a{letter}" for letter in string.ascii_lowercase] |
| parts.extend([f"b{letter}" for letter in string.ascii_lowercase[:8]]) |
|
|
| class BigEarthNetDataset(datasets.GeneratorBasedBuilder): |
| VERSION = datasets.Version("1.0.0") |
|
|
| DATA_URL = [ |
| f"https://huggingface.co/datasets/GFM-Bench/BigEarthNet/resolve/main/data/bigearthnet_part_{part}" |
| for part in parts |
| ] |
|
|
| metadata = { |
| "s2c": { |
| "bands":["B1", "B2", "B3", "B4", "B5", "B6", "B7", "B8", "B8A", "B9", "B11", "B12"], |
| "channel_wv": [442.7, 492.4, 559.8, 664.6, 704.1, 740.5, 782.8, 832.8, 864.7, 945.1, 1613.7, 2202.4], |
| "mean": S2_MEAN, |
| "std": S2_STD |
| }, |
| "s1": { |
| "bands": ["VV", "VH"], |
| "channel_wv": [5500, 5700], |
| "mean": S1_MEAN, |
| "std": S1_STD |
| } |
| } |
|
|
| SIZE = HEIGHT = WIDTH = 120 |
|
|
| NUM_CLASSES = 19 |
|
|
| spatial_resolution = 10 |
|
|
| def __init__(self, *args, **kwargs): |
| self.class2idx = {c: i for i, c in enumerate(class_sets[43])} |
|
|
| super().__init__(*args, **kwargs) |
|
|
| def _info(self): |
| metadata = self.metadata |
| metadata['size'] = self.SIZE |
| metadata['num_classes'] = self.NUM_CLASSES |
| metadata['spatial_resolution'] = self.spatial_resolution |
| return datasets.DatasetInfo( |
| description=json.dumps(metadata), |
| features=datasets.Features({ |
| "optical": datasets.Array3D(shape=(12, self.HEIGHT, self.WIDTH), dtype="float32"), |
| "radar": datasets.Array3D(shape=(2, self.HEIGHT, self.WIDTH), dtype="float32"), |
| "optical_channel_wv": datasets.Sequence(datasets.Value("float32")), |
| "radar_channel_wv": datasets.Sequence(datasets.Value("float32")), |
| "label": datasets.Sequence(datasets.Value("float32"), length=self.NUM_CLASSES), |
| "spatial_resolution": datasets.Value("int32"), |
| }), |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| if isinstance(self.DATA_URL, list): |
| try: |
| print("Downloading data files from HF") |
| downloaded_files = dl_manager.download(self.DATA_URL) |
| print("Downloading Finished") |
| combined_file = os.path.join(dl_manager.download_config.cache_dir, "combined.tar.gz") |
| with open(combined_file, 'wb') as outfile: |
| counter = 0 |
| for part_file in downloaded_files: |
| print(f"copying {counter}-th file") |
| with open(part_file, 'rb') as infile: |
| shutil.copyfileobj(infile, outfile) |
| data_dir = dl_manager.extract(combined_file) |
| os.remove(combined_file) |
| except Exception as e: |
| |
| print(f"An error occurred: {e}, setting data_dir to None") |
| data_dir = None |
| else: |
| data_dir = dl_manager.download_and_extract(self.DATA_URL) |
|
|
| return [ |
| datasets.SplitGenerator( |
| name="train", |
| gen_kwargs={ |
| "split": 'train', |
| "data_dir": data_dir, |
| }, |
| ), |
| datasets.SplitGenerator( |
| name="val", |
| gen_kwargs={ |
| "split": 'val', |
| "data_dir": data_dir, |
| }, |
| ), |
| datasets.SplitGenerator( |
| name="test", |
| gen_kwargs={ |
| "split": 'test', |
| "data_dir": data_dir, |
| }, |
| ) |
| ] |
|
|
| def _generate_examples(self, split, data_dir): |
| optical_channel_wv = np.array(self.metadata["s2c"]["channel_wv"]) |
| radar_channel_wv = np.array(self.metadata["s1"]["channel_wv"]) |
| spatial_resolution = self.spatial_resolution |
|
|
| data_dir = os.path.join(data_dir, "BigEarthNet") |
| metadata = pd.read_csv(os.path.join(data_dir, "metadata.csv")) |
| metadata = metadata[metadata["split"] == split].reset_index(drop=True) |
|
|
| for index, row in metadata.iterrows(): |
| optical_path = os.path.join(data_dir, row.optical_path) |
| optical = self._read_image(optical_path).astype(np.float32) |
|
|
| radar_path = os.path.join(data_dir, row.radar_path) |
| radar = self._read_image(radar_path).astype(np.float32) |
|
|
| label_path = os.path.join(data_dir, row.label_path) |
| label = self._load_label(label_path) |
|
|
| sample = { |
| "optical": optical, |
| "radar": radar, |
| "optical_channel_wv": optical_channel_wv, |
| "radar_channel_wv": radar_channel_wv, |
| "label": label, |
| "spatial_resolution": spatial_resolution, |
| } |
|
|
| yield f"{index}", sample |
| |
| def _load_label(self, label_path): |
| with open(label_path) as f: |
| labels = json.load(f)['labels'] |
| indices =[self.class2idx[label] for label in labels] |
| indices_optional = [label_converter.get(idx) for idx in indices] |
| indices = [idx for idx in indices_optional if idx is not None] |
| label = np.zeros(19, dtype=np.int64) |
| label[indices] = 1 |
| return label |
| |
| def _read_image(self, image_path): |
| """Read tiff image from image_path |
| Args: |
| image_path: |
| Image path to read from |
| |
| Return: |
| image: |
| C, H, W numpy array image |
| """ |
| image = tifffile.imread(image_path) |
| image = np.transpose(image, (2, 0, 1)) |
|
|
| return image |