|
|
|
|
|
""" |
|
|
Utilities for loading and manipulating HDF5 dataset optimized for ML. |
|
|
|
|
|
Features: |
|
|
- Fast extraction by class, temporal period |
|
|
- Create temporal sequences for LSTM/Transformer |
|
|
- Automatic data normalization |
|
|
- Filter by metadata (angle, resolution, etc.) |
|
|
- Extract sliding windows for learning |
|
|
- Support filtering by mask values |
|
|
|
|
|
Learning scenarios: |
|
|
1. Temporal stacking classification (group k-fold) |
|
|
2. Temporal prediction LSTM (time series) |
|
|
3. Domain adaptation HH vs HV |
|
|
4. Domain adaptation PAZ vs TerraSAR-X |
|
|
""" |
|
|
|
|
|
import h5py |
|
|
import numpy as np |
|
|
import json |
|
|
from typing import List, Dict, Tuple, Optional, Union |
|
|
from tqdm import tqdm |
|
|
from joblib import Parallel, delayed |
|
|
|
|
|
|
|
|
class MLDatasetLoader: |
|
|
"""Class to efficiently load the optimized HDF5 dataset with window extraction""" |
|
|
|
|
|
def __init__(self, hdf5_path: str): |
|
|
""" |
|
|
Args: |
|
|
hdf5_path: Path to HDF5 file |
|
|
""" |
|
|
self.hdf5_path = hdf5_path |
|
|
self.file = None |
|
|
self._load_metadata() |
|
|
|
|
|
def _load_metadata(self): |
|
|
"""Load metadata in memory for fast access""" |
|
|
with h5py.File(self.hdf5_path, 'r') as f: |
|
|
meta = f['metadata'] |
|
|
self.classes = json.loads(meta.attrs['classes']) |
|
|
self.n_groups = meta.attrs['n_total_groups'] |
|
|
self.nodata = meta.attrs['nodata_value'] |
|
|
|
|
|
|
|
|
self.class_index = {} |
|
|
for class_name in f['index/by_class'].keys(): |
|
|
entries_json = f[f'index/by_class/{class_name}'].attrs['entries_json'] |
|
|
self.class_index[class_name] = json.loads(entries_json) |
|
|
|
|
|
temp_ranges_json = f['index/temporal_ranges'].attrs['ranges_json'] |
|
|
self.temporal_ranges = json.loads(temp_ranges_json) |
|
|
|
|
|
def __enter__(self): |
|
|
"""Context manager entry""" |
|
|
self.file = h5py.File(self.hdf5_path, 'r') |
|
|
return self |
|
|
|
|
|
def __exit__(self, exc_type, exc_val, exc_tb): |
|
|
"""Context manager exit""" |
|
|
if self.file: |
|
|
self.file.close() |
|
|
|
|
|
def get_group_info(self, group_name: str) -> Dict: |
|
|
"""Get information for a group""" |
|
|
with h5py.File(self.hdf5_path, 'r') as f: |
|
|
if group_name not in f['data']: |
|
|
raise ValueError(f"Group {group_name} not found") |
|
|
|
|
|
group = f['data'][group_name] |
|
|
return { |
|
|
'class': group.attrs['class'], |
|
|
'latitude': group.attrs['latitude'], |
|
|
'longitude': group.attrs['longitude'], |
|
|
'elevation': group.attrs['elevation'], |
|
|
'orientation': group.attrs['orientation'], |
|
|
'slope': group.attrs['slope'], |
|
|
'orbits': list(group.keys()) |
|
|
} |
|
|
|
|
|
def extract_windows( |
|
|
self, |
|
|
image: np.ndarray, |
|
|
mask: np.ndarray, |
|
|
window_size: int, |
|
|
stride: Optional[int] = None, |
|
|
max_mask_value: int = 3, |
|
|
max_mask_percentage: float = 100.0, |
|
|
min_valid_percentage: float = 50.0, |
|
|
skip_optim_offset: bool = False |
|
|
) -> Tuple[np.ndarray, np.ndarray, List[Tuple[int, int]]]: |
|
|
""" |
|
|
Extract windows from an image with mask filtering. |
|
|
Automatically optimize starting positions to maximize the number of valid windows. |
|
|
|
|
|
Args: |
|
|
image: Image (H, W) or (H, W, C) or (H, W, T) or (H, W, C, T) |
|
|
mask: Mask (H, W) or (H, W, T) |
|
|
window_size: Window size (square) |
|
|
stride: Stride step (if None, = window_size for non-overlapping) |
|
|
max_mask_value: Maximum accepted mask value (0, 1, 2, 3) |
|
|
max_mask_percentage: Max percentage of pixels with mask > max_mask_value |
|
|
min_valid_percentage: Min percentage of valid pixels (non nodata) |
|
|
skip_optim_offset: If True, skip offset optimization and use (0, 0) |
|
|
|
|
|
Returns: |
|
|
windows: Array of extracted windows |
|
|
window_masks: Array of corresponding masks |
|
|
positions: List of (y, x) positions for each window |
|
|
""" |
|
|
if stride is None: |
|
|
stride = window_size |
|
|
|
|
|
|
|
|
if image.ndim == 2: |
|
|
h, w = image.shape |
|
|
has_channels = False |
|
|
has_time = False |
|
|
elif image.ndim == 3: |
|
|
h, w, c = image.shape |
|
|
has_channels = True |
|
|
has_time = False |
|
|
elif image.ndim == 4: |
|
|
h, w, c, t = image.shape |
|
|
has_channels = True |
|
|
has_time = True |
|
|
|
|
|
if mask.ndim == 2: |
|
|
mask_has_time = False |
|
|
elif mask.ndim == 3: |
|
|
mask_has_time = True |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if skip_optim_offset: |
|
|
|
|
|
best_start_y = 0 |
|
|
best_start_x = 0 |
|
|
best_count = -1 |
|
|
else: |
|
|
def count_valid_windows(start_y, start_x): |
|
|
"""Count the number of valid windows for a given offset""" |
|
|
count = 0 |
|
|
for y in range(start_y, h - window_size + 1, stride): |
|
|
for x in range(start_x, w - window_size + 1, stride): |
|
|
|
|
|
if mask_has_time: |
|
|
window_mask = mask[y:y+window_size, x:x+window_size, :] |
|
|
else: |
|
|
window_mask = mask[y:y+window_size, x:x+window_size] |
|
|
|
|
|
|
|
|
if mask_has_time: |
|
|
bad_pixels = np.any(window_mask > max_mask_value, axis=-1) |
|
|
else: |
|
|
bad_pixels = window_mask > max_mask_value |
|
|
|
|
|
bad_percentage = (np.sum(bad_pixels) / (window_size * window_size)) * 100.0 |
|
|
|
|
|
|
|
|
if image.ndim == 2: |
|
|
window = image[y:y+window_size, x:x+window_size] |
|
|
elif image.ndim == 3: |
|
|
window = image[y:y+window_size, x:x+window_size, :] |
|
|
elif image.ndim == 4: |
|
|
window = image[y:y+window_size, x:x+window_size, :, :] |
|
|
|
|
|
|
|
|
if has_time: |
|
|
|
|
|
is_invalid = (window == self.nodata) | np.isnan(window) |
|
|
valid_pixels = np.all(~is_invalid, axis=-1) |
|
|
if has_channels: |
|
|
valid_pixels = np.all(valid_pixels, axis=-1) |
|
|
valid_percentage = (np.sum(valid_pixels) / (window_size * window_size)) * 100.0 |
|
|
else: |
|
|
if has_channels: |
|
|
is_invalid = (window == self.nodata) | np.isnan(window) |
|
|
valid_pixels = np.all(~is_invalid, axis=-1) |
|
|
else: |
|
|
is_invalid = (window == self.nodata) | np.isnan(window) |
|
|
valid_pixels = ~is_invalid |
|
|
valid_percentage = (np.sum(valid_pixels) / (window_size * window_size)) * 100.0 |
|
|
|
|
|
|
|
|
if bad_percentage <= max_mask_percentage and valid_percentage >= min_valid_percentage: |
|
|
count += 1 |
|
|
|
|
|
return count |
|
|
|
|
|
|
|
|
|
|
|
best_count = 0 |
|
|
best_start_y = 0 |
|
|
best_start_x = 0 |
|
|
|
|
|
|
|
|
max_offset = min(stride, window_size) |
|
|
|
|
|
|
|
|
offsets_to_test = [] |
|
|
for start_y in range(max_offset): |
|
|
for start_x in range(max_offset): |
|
|
if start_y + window_size <= h and start_x + window_size <= w: |
|
|
offsets_to_test.append((start_y, start_x)) |
|
|
|
|
|
|
|
|
counts = Parallel(n_jobs=-1)( |
|
|
delayed(count_valid_windows)(start_y, start_x) |
|
|
for start_y, start_x in tqdm(offsets_to_test, desc="Optimizing offset", leave=False) |
|
|
) |
|
|
|
|
|
|
|
|
if len(counts) > 0: |
|
|
best_idx = np.argmax(counts) |
|
|
best_start_y, best_start_x = offsets_to_test[best_idx] |
|
|
best_count = counts[best_idx] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
windows = [] |
|
|
window_masks = [] |
|
|
positions = [] |
|
|
|
|
|
|
|
|
for y in range(best_start_y, h - window_size + 1, stride): |
|
|
for x in range(best_start_x, w - window_size + 1, stride): |
|
|
|
|
|
if image.ndim == 2: |
|
|
window = image[y:y+window_size, x:x+window_size] |
|
|
elif image.ndim == 3: |
|
|
window = image[y:y+window_size, x:x+window_size, :] |
|
|
elif image.ndim == 4: |
|
|
window = image[y:y+window_size, x:x+window_size, :, :] |
|
|
|
|
|
if mask_has_time: |
|
|
window_mask = mask[y:y+window_size, x:x+window_size, :] |
|
|
else: |
|
|
window_mask = mask[y:y+window_size, x:x+window_size] |
|
|
|
|
|
|
|
|
|
|
|
if mask_has_time: |
|
|
bad_pixels = np.any(window_mask > max_mask_value, axis=-1) |
|
|
else: |
|
|
bad_pixels = window_mask > max_mask_value |
|
|
|
|
|
bad_percentage = (np.sum(bad_pixels) / (window_size * window_size)) * 100.0 |
|
|
|
|
|
|
|
|
if has_time: |
|
|
|
|
|
is_invalid = (window == self.nodata) | np.isnan(window) |
|
|
valid_pixels = np.all(~is_invalid, axis=-1) |
|
|
if has_channels: |
|
|
valid_pixels = np.all(valid_pixels, axis=-1) |
|
|
valid_percentage = (np.sum(valid_pixels) / (window_size * window_size)) * 100.0 |
|
|
else: |
|
|
if has_channels: |
|
|
is_invalid = (window == self.nodata) | np.isnan(window) |
|
|
valid_pixels = np.all(~is_invalid, axis=-1) |
|
|
else: |
|
|
is_invalid = (window == self.nodata) | np.isnan(window) |
|
|
valid_pixels = ~is_invalid |
|
|
valid_percentage = (np.sum(valid_pixels) / (window_size * window_size)) * 100.0 |
|
|
|
|
|
|
|
|
if bad_percentage <= max_mask_percentage and valid_percentage >= min_valid_percentage: |
|
|
|
|
|
window = window.astype(np.float32) |
|
|
|
|
|
windows.append(window) |
|
|
window_masks.append(window_mask) |
|
|
positions.append((y, x)) |
|
|
|
|
|
if len(windows) == 0: |
|
|
return None, None, [] |
|
|
|
|
|
return np.array(windows), np.array(window_masks), positions |
|
|
|
|
|
def load_data( |
|
|
self, |
|
|
group_name: str, |
|
|
orbit: str = 'DSC', |
|
|
polarisation: Union[str, List[str]] = 'HH', |
|
|
start_date: Optional[str] = None, |
|
|
end_date: Optional[str] = None, |
|
|
normalize: bool = False, |
|
|
remove_nodata: bool = True, |
|
|
scale_type: str = 'intensity' |
|
|
) -> Dict: |
|
|
""" |
|
|
Load data for a specific group. |
|
|
|
|
|
Args: |
|
|
group_name: Group name (e.g. 'ABL001') |
|
|
orbit: 'ASC' or 'DSC' |
|
|
polarisation: 'HH', 'HV' or ['HH', 'HV'] for dual-pol |
|
|
start_date: Start date (format 'YYYYMMDD') |
|
|
end_date: End date (format 'YYYYMMDD') |
|
|
normalize: If True, normalize with pre-calculated stats |
|
|
remove_nodata: If True, replace nodata with NaN |
|
|
scale_type: 'intensity' (default), 'amplitude' (data**0.5), or 'log10' (log10 scale) |
|
|
|
|
|
Returns: |
|
|
Dict containing: images, masks, timestamps, metadata |
|
|
""" |
|
|
with h5py.File(self.hdf5_path, 'r') as f: |
|
|
|
|
|
if isinstance(polarisation, list): |
|
|
|
|
|
data_list = [] |
|
|
for pol in polarisation: |
|
|
path = f'data/{group_name}/{orbit}/{pol}' |
|
|
if path not in f: |
|
|
raise ValueError(f"Path {path} not found in dataset") |
|
|
data_list.append(f[path]) |
|
|
|
|
|
|
|
|
timestamps_hh = data_list[0]['timestamps'][:] |
|
|
timestamps_hv = data_list[1]['timestamps'][:] |
|
|
|
|
|
|
|
|
common_ts = np.intersect1d(timestamps_hh, timestamps_hv) |
|
|
|
|
|
if len(common_ts) == 0: |
|
|
raise ValueError(f"No common timestamps between HH and HV for {group_name}") |
|
|
|
|
|
|
|
|
if start_date or end_date: |
|
|
mask_ts = np.ones(len(common_ts), dtype=bool) |
|
|
if start_date: |
|
|
mask_ts &= common_ts >= start_date.encode('utf-8') |
|
|
if end_date: |
|
|
mask_ts &= common_ts <= end_date.encode('utf-8') |
|
|
common_ts = common_ts[mask_ts] |
|
|
|
|
|
if len(common_ts) == 0: |
|
|
raise ValueError(f"No data in specified date range") |
|
|
|
|
|
|
|
|
images_list = [] |
|
|
masks_list = [] |
|
|
angles_list = [] |
|
|
|
|
|
|
|
|
min_h, min_w = None, None |
|
|
for pol, data_pol in zip(polarisation, data_list): |
|
|
ts_pol = data_pol['timestamps'][:] |
|
|
indices = [np.where(ts_pol == ts)[0][0] for ts in common_ts] |
|
|
img_pol = data_pol['images'][:, :, indices] |
|
|
h, w, t = img_pol.shape |
|
|
if min_h is None: |
|
|
min_h, min_w = h, w |
|
|
else: |
|
|
min_h = min(min_h, h) |
|
|
min_w = min(min_w, w) |
|
|
|
|
|
|
|
|
for pol, data_pol in zip(polarisation, data_list): |
|
|
|
|
|
ts_pol = data_pol['timestamps'][:] |
|
|
indices = [np.where(ts_pol == ts)[0][0] for ts in common_ts] |
|
|
|
|
|
|
|
|
img_pol = data_pol['images'][:min_h, :min_w, indices] |
|
|
mask_pol = data_pol['masks'][:min_h, :min_w, indices] |
|
|
|
|
|
images_list.append(img_pol) |
|
|
masks_list.append(mask_pol) |
|
|
|
|
|
if pol == polarisation[0]: |
|
|
angles_list = data_pol['angles_incidence'][:][indices] |
|
|
|
|
|
|
|
|
images = np.stack(images_list, axis=-1) |
|
|
|
|
|
|
|
|
masks = np.maximum(masks_list[0], masks_list[1]) |
|
|
|
|
|
timestamps = common_ts |
|
|
angles = angles_list |
|
|
|
|
|
metadata = { |
|
|
'polarisation': polarisation, |
|
|
'dual_pol': True |
|
|
} |
|
|
|
|
|
else: |
|
|
|
|
|
path = f'data/{group_name}/{orbit}/{polarisation}' |
|
|
|
|
|
if path not in f: |
|
|
raise ValueError(f"Path {path} not found in dataset") |
|
|
|
|
|
pol_data = f[path] |
|
|
|
|
|
|
|
|
images = pol_data['images'][:] |
|
|
masks = pol_data['masks'][:] |
|
|
timestamps = pol_data['timestamps'][:] |
|
|
angles = pol_data['angles_incidence'][:] |
|
|
|
|
|
|
|
|
if start_date or end_date: |
|
|
mask_ts = np.ones(len(timestamps), dtype=bool) |
|
|
if start_date: |
|
|
mask_ts &= timestamps >= start_date.encode('utf-8') |
|
|
if end_date: |
|
|
mask_ts &= timestamps <= end_date.encode('utf-8') |
|
|
|
|
|
if not np.any(mask_ts): |
|
|
raise ValueError(f"No data in specified date range") |
|
|
|
|
|
images = images[:, :, mask_ts] |
|
|
masks = masks[:, :, mask_ts] |
|
|
timestamps = timestamps[mask_ts] |
|
|
angles = angles[mask_ts] |
|
|
|
|
|
metadata = { |
|
|
'mean': pol_data.attrs['stat_mean'], |
|
|
'std': pol_data.attrs['stat_std'], |
|
|
'min': pol_data.attrs['stat_min'], |
|
|
'max': pol_data.attrs['stat_max'], |
|
|
'n_samples': pol_data.attrs['n_timestamps'], |
|
|
'polarisation': polarisation, |
|
|
'dual_pol': False |
|
|
} |
|
|
|
|
|
|
|
|
if remove_nodata: |
|
|
images = np.where(images == self.nodata, np.nan, images) |
|
|
|
|
|
|
|
|
if scale_type == 'amplitude': |
|
|
|
|
|
images_transformed = np.where(images >= 0, np.sqrt(images), np.nan) |
|
|
images = images_transformed.astype(np.float32) |
|
|
elif scale_type == 'log10': |
|
|
|
|
|
images = np.where(images > 0, np.log10(images), np.nan) |
|
|
|
|
|
|
|
|
if normalize and not isinstance(polarisation, list): |
|
|
mean = metadata['mean'] |
|
|
std = metadata['std'] |
|
|
if std > 0: |
|
|
images = (images - mean) / std |
|
|
|
|
|
return { |
|
|
'images': images, |
|
|
'masks': masks, |
|
|
'timestamps': [t.decode('utf-8') for t in timestamps], |
|
|
'angles_incidence': angles, |
|
|
'metadata': metadata, |
|
|
'group': group_name, |
|
|
'orbit': orbit |
|
|
} |
|
|
|
|
|
def get_groups_by_class(self, class_name: str) -> List[str]: |
|
|
"""Return the list of groups for a given class""" |
|
|
if class_name not in self.class_index: |
|
|
return [] |
|
|
return [entry['group'] for entry in self.class_index[class_name]] |
|
|
|
|
|
def get_all_groups_with_classes(self) -> Dict[str, str]: |
|
|
"""Return a dictionary {group_name: class_name}""" |
|
|
group_to_class = {} |
|
|
for class_name in self.classes: |
|
|
for group in self.get_groups_by_class(class_name): |
|
|
group_to_class[group] = class_name |
|
|
return group_to_class |
|
|
|
|
|
def get_statistics_summary(self) -> Dict: |
|
|
"""Return a summary of dataset statistics""" |
|
|
stats = { |
|
|
'by_class': {}, |
|
|
'global': { |
|
|
'n_groups': self.n_groups, |
|
|
'n_classes': len(self.classes), |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
for class_name in self.classes: |
|
|
groups = self.get_groups_by_class(class_name) |
|
|
stats['by_class'][class_name] = { |
|
|
'n_groups': len(groups), |
|
|
'groups': groups |
|
|
} |
|
|
|
|
|
|
|
|
return stats |
|
|
|