| """ |
| |
| This script contains helper functions to process the AGBD Dataset hosted on HuggingFace at: https://huggingface.co/datasets/prs-eth/AGBD. |
| |
| """ |
|
|
|
|
| |
| |
|
|
| import numpy as np |
| from datasets import Value |
| import pickle |
| import pandas as pd |
|
|
| |
| |
|
|
| |
| feature_dtype = {'s2_num_days': Value('int16'), |
| 'gedi_num_days': Value('uint16'), |
| 'lat': Value('float32'), |
| 'lon': Value('float32'), |
| "agbd_se": Value('float32'), |
| "elev_lowes": Value('float32'), |
| "leaf_off_f": Value('uint8'), |
| "pft_class": Value('uint8'), |
| "region_cla": Value('uint8'), |
| "rh98": Value('float32'), |
| "sensitivity": Value('float32'), |
| "solar_elev": Value('float32'), |
| "urban_prop":Value('uint8')} |
|
|
| |
| s2_bands_idx = {'B01': 0, 'B02': 1, 'B03': 2, 'B04': 3, 'B05': 4, 'B06': 5, 'B07': 6, 'B08': 7, 'B8A': 8, 'B09': 9, 'B11': 10, 'B12': 11} |
|
|
| |
| with open('statistics.pkl', 'rb') as f: norm_values = pickle.load(f) |
|
|
| |
| NODATAVALS = {'S2_bands' : 0, 'CH': 255, 'ALOS_bands': -9999.0, 'DEM': -9999, 'LC': 255} |
|
|
| |
| REF_BIOMES = {20: 'Shrubs', 30: 'Herbaceous vegetation', 40: 'Cultivated', 90: 'Herbaceous wetland', 111: 'Closed-ENL', 112: 'Closed-EBL', 114: 'Closed-DBL', 115: 'Closed-mixed', 116: 'Closed-other', 121: 'Open-ENL', 122: 'Open-EBL', 124: 'Open-DBL', 125: 'Open-mixed', 126: 'Open-other'} |
| _biome_values_mapping = {v: i for i, v in enumerate(REF_BIOMES.keys())} |
|
|
| |
| |
|
|
| def normalize_data(data, norm_values, norm_strat, nodata_value = None) : |
| """ |
| Normalize the data, according to various strategies: |
| - mean_std: subtract the mean and divide by the standard deviation |
| - pct: subtract the 1st percentile and divide by the 99th percentile |
| - min_max: subtract the minimum and divide by the maximum |
| |
| Args: |
| - data (np.array): the data to normalize |
| - norm_values (dict): the normalization values |
| - norm_strat (str): the normalization strategy |
| |
| Returns: |
| - normalized_data (np.array): the normalized data |
| """ |
|
|
| if norm_strat == 'mean_std' : |
| mean, std = norm_values['mean'], norm_values['std'] |
| if nodata_value is not None : |
| data = np.where(data == nodata_value, 0, (data - mean) / std) |
| else : data = (data - mean) / std |
|
|
| elif norm_strat == 'pct' : |
| p1, p99 = norm_values['p1'], norm_values['p99'] |
| if nodata_value is not None : |
| data = np.where(data == nodata_value, 0, (data - p1) / (p99 - p1)) |
| else : |
| data = (data - p1) / (p99 - p1) |
| data = np.clip(data, 0, 1) |
|
|
| elif norm_strat == 'min_max' : |
| min_val, max_val = norm_values['min'], norm_values['max'] |
| if nodata_value is not None : |
| data = np.where(data == nodata_value, 0, (data - min_val) / (max_val - min_val)) |
| else: |
| data = (data - min_val) / (max_val - min_val) |
| |
| else: |
| raise ValueError(f'Normalization strategy `{norm_strat}` is not valid.') |
|
|
| return data |
|
|
|
|
| def normalize_bands(bands_data, norm_values, order, norm_strat, nodata_value = None) : |
| """ |
| This function normalizes the bands data using the normalization values and strategy. |
| |
| Args: |
| - bands_data (np.array): the bands data to normalize |
| - norm_values (dict): the normalization values |
| - order (list): the order of the bands |
| - norm_strat (str): the normalization strategy |
| - nodata_value (int/float): the nodata value |
| |
| Returns: |
| - bands_data (np.array): the normalized bands data |
| """ |
| |
| for i, band in enumerate(order) : |
| band_norm = norm_values[band] |
| bands_data[:, :, i] = normalize_data(bands_data[:, :, i], band_norm, norm_strat, nodata_value) |
| |
| return bands_data |
|
|
|
|
| def one_hot(x) : |
| one_hot = np.zeros(len(_biome_values_mapping)) |
| one_hot[_biome_values_mapping.get(x, 0)] = 1 |
| return one_hot |
|
|
| def encode_biome(lc, encode_strat, embeddings = None) : |
| """ |
| This function encodes the land cover data using different strategies: 1) sin/cosine encoding, |
| 2) cat2vec embeddings, 3) one-hot encoding. |
| |
| Args: |
| - lc (np.array): the land cover data |
| - encode_strat (str): the encoding strategy |
| - embeddings (dict): the cat2vec embeddings |
| |
| Returns: |
| - encoded_lc (np.array): the encoded land cover data |
| """ |
|
|
| if encode_strat == 'sin_cos' : |
| |
| lc_cos = np.where(lc == NODATAVALS['LC'], 0, (np.cos(2 * np.pi * lc / 201) + 1) / 2) |
| lc_sin = np.where(lc == NODATAVALS['LC'], 0, (np.sin(2 * np.pi * lc / 201) + 1) / 2) |
| return np.stack([lc_cos, lc_sin], axis = -1).astype(np.float32) |
| |
| elif encode_strat == 'cat2vec' : |
| |
| lc_cat2vec = np.vectorize(lambda x: embeddings.get(x, embeddings.get(0)), signature = '()->(n)')(lc) |
| return lc_cat2vec.astype(np.float32) |
|
|
| elif encode_strat == 'onehot' : |
| lc_onehot = np.vectorize(one_hot, signature = '() -> (n)')(lc).astype(np.float32) |
| return lc_onehot |
|
|
| else: raise ValueError(f'Encoding strategy `{encode_strat}` is not valid.') |
|
|
|
|
| def compute_num_features(input_features, encode_strat) : |
| """ |
| This function computes the number of features that will be used in the model. |
| |
| Args: |
| - input_features (dict): the input features configuration |
| - encode_strat (str): the encoding strategy |
| |
| Returns: |
| - num_features (int): the number of features |
| """ |
|
|
| num_features = len(input_features['S2_bands']) |
| if input_features['S2_dates'] : num_features += 3 |
| if input_features['lat_lon'] : num_features += 4 |
| if input_features['GEDI_dates'] : num_features += 3 |
| if input_features['ALOS'] : num_features += 2 |
| if input_features['CH'] : num_features += 2 |
| if input_features['LC'] : |
| num_features += 1 |
| if encode_strat == 'sin_cos' : num_features += 2 |
| elif encode_strat == 'cat2vec' : num_features += 5 |
| elif encode_strat == 'onehot' : num_features += len(REF_BIOMES) |
| if input_features['DEM'] : num_features += 1 |
| if input_features['topo'] : num_features += 3 |
|
|
| return num_features |
|
|
|
|
| def load_embeddings(user_config) : |
|
|
| if user_config['encode_strat'] == 'cat2vec' : |
| embeddings = pd.read_csv("embeddings_train.csv") |
| embeddings = dict([(v,np.array([a,b,c,d,e])) for v, a,b,c,d,e in zip(embeddings.mapping, embeddings.dim0, embeddings.dim1, embeddings.dim2, embeddings.dim3, embeddings.dim4)]) |
| user_config['embeddings'] = embeddings |
| else: user_config['embeddings'] = None |
|
|
| return user_config |
|
|
|
|
| |
| |
|
|
| def process_batch(batch, norm_strat, encode_strat, input_features, metadata, patch_size, embeddings): |
| """ |
| This function processes a batch of data from the HuggingFace AGBD dataset according to the user-defined configuration. |
| |
| Args: |
| - batch (dict): the batch of data from the dataset |
| - norm_strat (str): the normalization strategy |
| - encode_strat (str): the encoding strategy for land cover data |
| - input_features (dict): the input features configuration |
| - metadata (list): the metadata variables to return |
| - patch_size (int): the size of the patches |
| - embeddings (dict): the cat2vec embeddings for land cover data |
| |
| Returns: |
| - processed_batch (dict): the processed batch of data with normalized and concatenated features |
| """ |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| patches = np.asarray(batch["input"]) |
| batch_size = patches.shape[0] |
| og_patch_size = patches.shape[-1] |
|
|
| |
| num_features = compute_num_features(input_features, encode_strat) |
| out_patch = np.zeros((batch_size, num_features, og_patch_size, og_patch_size), dtype = np.float32) |
| current_idx = 0 |
| |
| |
| s2_indices = [s2_bands_idx[band] for band in input_features['S2_bands']] |
| out_patch[:, current_idx : current_idx + len(s2_indices)] = patches[:, s2_indices] if norm_strat == 'none' else normalize_bands(patches[:, s2_indices], norm_values['S2_bands'], ['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B8A', 'B09','B11', 'B12'], norm_strat, NODATAVALS['S2_bands']) |
| current_idx += len(s2_indices) |
|
|
| |
| if input_features['S2_dates'] : |
| out_patch[:, current_idx : current_idx + 3] = patches[:, 12:15] if norm_strat == 'none' else np.stack([ |
| normalize_data(patches[:, 12], norm_values['Sentinel_metadata']['S2_date'], 'min_max' if norm_strat == 'pct' else norm_strat), |
| patches[:, 13], |
| patches[:, 14] |
| ], axis = 1) |
| current_idx += 3 |
| |
| |
| if input_features['lat_lon'] : |
| out_patch[:, current_idx : current_idx + 4] = patches[:, 15:19] |
| current_idx += 4 |
| |
| |
| if input_features['GEDI_dates'] : |
| out_patch[:, current_idx : current_idx + 3] = patches[:, 19:22] if norm_strat == 'none' else np.stack([ |
| normalize_data(patches[:, 19], norm_values['GEDI']['date'], 'min_max' if norm_strat == 'pct' else norm_strat), |
| patches[:, 20], |
| patches[:, 21] |
| ], axis = 1) |
| current_idx += 3 |
| |
| |
| if input_features['ALOS'] : |
| out_patch[:, current_idx : current_idx + 2] = patches[:, 22:24] if norm_strat == 'none' else normalize_bands(patches[:, 22:24], norm_values['ALOS_bands'], ['HH', 'HV'], norm_strat, NODATAVALS['ALOS_bands']) |
| current_idx += 2 |
| |
| |
| if input_features['CH'] : |
| out_patch[:, current_idx] = patches[:, 24] if norm_strat == 'none' else normalize_data(patches[:, 24], norm_values['CH']['ch'], norm_strat, NODATAVALS['CH']) |
| out_patch[:, current_idx + 1] = patches[:, 25] if norm_strat == 'none' else normalize_data(patches[:, 25], norm_values['CH']['std'], norm_strat, NODATAVALS['CH']) |
| current_idx += 2 |
| |
| |
| if input_features['LC'] : |
|
|
| |
| if encode_strat != 'none' : |
| lc_patch = np.vectorize(lambda x: encode_biome(x, encode_strat, embeddings), signature = '()->(n)')(patches[:, 26]) |
| out_patch[:, current_idx : current_idx + lc_patch.shape[-1]] = lc_patch.swapaxes(-1,1) |
| current_idx += lc_patch.shape[-1] |
| else: |
| out_patch[:, current_idx] = patches[:, 26] |
| current_idx += 1 |
| |
| |
| out_patch[:, current_idx] = patches[:, 27] / 100 |
| current_idx += 1 |
| |
| |
| if input_features['topo'] : |
|
|
| out_patch[:, current_idx : current_idx + 3] = patches[:, 28:31] |
| current_idx += 3 |
| |
| |
| if input_features['DEM'] : |
| out_patch[:, current_idx] = patches[:, 31] if norm_strat == 'none' else normalize_data(patches[:, 31], norm_values['DEM'], norm_strat, NODATAVALS['DEM']) |
| current_idx += 1 |
|
|
| |
|
|
| |
| start = (patches.shape[-1] - patch_size) // 2 |
| out_patch = out_patch[:, start : start + patch_size, start : start + patch_size] |
|
|
| |
|
|
| |
| if metadata == [] : out_metadata = [{} for _ in range(batch_size)] |
| else: |
| out_metadata = [ |
| {key: d[key] for key in metadata} |
| for d in batch["metadata"] |
| ] |
|
|
| |
|
|
| return {'input': out_patch, 'label': batch["label"], 'metadata': out_metadata} |
|
|