anfera236 commited on
Commit
ca4c713
·
verified ·
1 Parent(s): f5ad373

Upload 4 files

Browse files
images/AllPercentiles.png ADDED

Git LFS Details

  • SHA256: d26a78f53d51b22ca9f736d1c012b5428e3fceb18f5a375e3f994ca985befafb
  • Pointer size: 131 Bytes
  • Size of remote file: 891 kB
images/Explanation.png ADDED

Git LFS Details

  • SHA256: d541fde453c094825a2d72ee8c3cfbd7b776c103e64ec4530a97974d41a039ba
  • Pointer size: 132 Bytes
  • Size of remote file: 1.84 MB
scripts/canopy_plots.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Canopy height model helpers.
3
+
4
+ These utilities create canopy height models (CHM) from voxelized point cloud
5
+ data cubes while applying a simple adaptive filter to smooth noisy ground
6
+ estimates (DTM). Functions are written with clarity and deterministic output in
7
+ mind for use in notebooks and scripts.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ from typing import Tuple
13
+
14
+ import numpy as np
15
+ from numpy.typing import NDArray
16
+
17
+ Array2D = NDArray[np.floating]
18
+ Array3D = NDArray[np.floating]
19
+
20
+
21
+ def apply_kernel(dtm: Array2D, center: Tuple[int, int], kernel_size: int) -> Array2D:
22
+ """
23
+ Return a window of `dtm` centered on `center`, clipped to array bounds.
24
+
25
+ Parameters
26
+ ----------
27
+ dtm:
28
+ Two-dimensional digital terrain model array.
29
+ center:
30
+ (row, column) index around which to extract the window.
31
+ kernel_size:
32
+ Size of the square window (must be positive).
33
+ """
34
+ if kernel_size <= 0:
35
+ raise ValueError("kernel_size must be a positive integer.")
36
+
37
+ half = kernel_size // 2
38
+ row_start = max(center[0] - half, 0)
39
+ row_end = min(center[0] + half + 1, dtm.shape[0])
40
+ col_start = max(center[1] - half, 0)
41
+ col_end = min(center[1] + half + 1, dtm.shape[1])
42
+
43
+ return dtm[row_start:row_end, col_start:col_end]
44
+
45
+
46
+ def adaptive_dtm_filter(
47
+ dtm: Array2D, kernel_size: int = 7, percentile: float = 50.0
48
+ ) -> Array2D:
49
+ """
50
+ Smooth the DTM by replacing outliers with the mean of local lower-percentile values.
51
+
52
+ Each pixel greater than the specified percentile in its neighborhood is
53
+ replaced by the mean of the values at or below that percentile. This
54
+ mitigates spikes that otherwise lead to artifacts in the derived CHM.
55
+ """
56
+ filtered = dtm.copy()
57
+ rows, cols = filtered.shape
58
+
59
+ for row in range(rows):
60
+ for col in range(cols):
61
+ window = apply_kernel(filtered, (row, col), kernel_size).ravel()
62
+ if window.size == 0:
63
+ continue
64
+
65
+ threshold = float(np.percentile(window, percentile))
66
+ if filtered[row, col] > threshold:
67
+ lower_values = window[window <= threshold]
68
+ if lower_values.size:
69
+ filtered[row, col] = float(lower_values.mean())
70
+
71
+ return filtered
72
+
73
+
74
+ def hillshade(
75
+ array: Array2D, azimuth: float = 90.0, angle_altitude: float = 60.0
76
+ ) -> Array2D:
77
+ """
78
+ Generate a simple hillshade from a 2D array for visualization.
79
+
80
+ Parameters use degrees, consistent with most GIS tools.
81
+ """
82
+ azimuth = 360.0 - azimuth
83
+
84
+ x_gradient, y_gradient = np.gradient(array)
85
+ slope = np.pi / 2.0 - np.arctan(np.sqrt(x_gradient * x_gradient + y_gradient * y_gradient))
86
+ aspect = np.arctan2(-x_gradient, y_gradient)
87
+ azimuth_rad = azimuth * np.pi / 180.0
88
+ altitude_rad = angle_altitude * np.pi / 180.0
89
+
90
+ shaded = (
91
+ np.sin(altitude_rad) * np.sin(slope)
92
+ + np.cos(altitude_rad) * np.cos(slope) * np.cos((azimuth_rad - np.pi / 2.0) - aspect)
93
+ )
94
+
95
+ return 255.0 * (shaded + 1.0) / 2.0
96
+
97
+
98
+ def calc_height_surface(cube: Array3D, percentile: float) -> NDArray[np.int64]:
99
+ """
100
+ Compute the height surface where the cumulative density exceeds `percentile`.
101
+ """
102
+ cumulative = np.cumsum(cube, axis=0)
103
+ maxima = cumulative.max(axis=0)
104
+ maxima[maxima == 0] = 1 # avoid division by zero
105
+ normalized = cumulative / maxima
106
+
107
+ surface = normalized > percentile
108
+ return np.argmax(surface, axis=0).astype(np.int64)
109
+
110
+
111
+ def create_chm(
112
+ cube: Array3D, canopy_percentile: float = 0.98, dtm_percentile: float = 0.05, kernel_size: int = 7
113
+ ) -> tuple[Array2D, Array2D, Array2D, NDArray[np.int64]]:
114
+ """
115
+ Build a canopy height model, ground model, hillshade, and DEM from a cube.
116
+
117
+ Returns
118
+ -------
119
+ chm:
120
+ Canopy height model (DEM minus DTM).
121
+ dtm:
122
+ Smoothed digital terrain model.
123
+ dtm_hillshade:
124
+ Hillshaded representation of the DTM for visualization.
125
+ dem:
126
+ Digital elevation model derived from the cube percentile.
127
+ """
128
+ dtm = calc_height_surface(cube, dtm_percentile)
129
+ dtm = adaptive_dtm_filter(dtm, kernel_size=kernel_size)
130
+ dem = calc_height_surface(cube, canopy_percentile)
131
+ chm = dem - dtm
132
+
133
+ return chm, dtm, hillshade(dtm), dem
134
+
135
+
136
+ # Backwards-compatible aliases for existing notebooks/scripts.
137
+ calcSurf = calc_height_surface
138
+ createCHM = create_chm
139
+
140
+ __all__ = [
141
+ "adaptive_dtm_filter",
142
+ "apply_kernel",
143
+ "calc_height_surface",
144
+ "calcSurf",
145
+ "create_chm",
146
+ "createCHM",
147
+ "hillshade",
148
+ ]
scripts/forward_model.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import matplotlib.pyplot as plt
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+
9
+ try:
10
+ # When imported as part of the package.
11
+ from .canopy_plots import createCHM
12
+ except ImportError:
13
+ # Fallback for running as a standalone script (python -m hhdc.forward_model).
14
+ from hhdc.canopy_plots import createCHM
15
+
16
+
17
+ class LidarForwardImagingModel(nn.Module):
18
+ def __init__(
19
+ self,
20
+ input_res_m=(2.0, 2.0),
21
+ output_res_m=(3.0, 6.0),
22
+ footprint_diameter_m=10.0,
23
+ b=0.1,
24
+ eta=0.5,
25
+ ref_altitude=500.0,
26
+ ref_photon_count=20.0,
27
+ ):
28
+ """
29
+ Args:
30
+ input_res_m (tuple): Physical size of input pixels (dy, dx) in meters.
31
+ output_res_m (tuple): Physical size of output pixels (dy, dx) in meters.
32
+ footprint_diameter_m (float): The 1/e^2 beam diameter in meters.
33
+ b (float): Background noise.
34
+ eta (float): Readout noise.
35
+ ref_altitude (float): Reference altitude (km).
36
+ ref_photon_count (float): Target photon count.
37
+ """
38
+ super().__init__()
39
+ self.b = b
40
+ self.eta = eta
41
+ self.ref_altitude = ref_altitude
42
+ self.ref_photon_count = ref_photon_count
43
+
44
+ self.input_res_m = input_res_m
45
+ self.output_res_m = output_res_m
46
+
47
+ # 1. Calculate Area Scale Factor
48
+ in_area = input_res_m[0] * input_res_m[1]
49
+ out_area = output_res_m[0] * output_res_m[1]
50
+ self.area_scale_factor = out_area / in_area
51
+
52
+ # 2. Calculate Sigma in Input Pixels
53
+ # Def: 1/e^2 diameter is 4 * sigma
54
+ sigma_m = footprint_diameter_m / 4.0
55
+
56
+ avg_input_res = (input_res_m[0] + input_res_m[1]) / 2.0
57
+ sigma_px = sigma_m / avg_input_res
58
+
59
+ # 3. Create Base Kernel
60
+ # We use 6*sigma for the kernel size to capture >99% of energy
61
+ # (Since diameter is 4*sigma, this means kernel is 1.5x the footprint size)
62
+ kernel_size = int(math.ceil(6 * sigma_px))
63
+ if kernel_size % 2 == 0:
64
+ kernel_size += 1
65
+
66
+ self.register_buffer("kernel", self._create_gaussian_kernel(kernel_size, sigma_px))
67
+
68
+ print(f"Model Initialized: In {input_res_m}m -> Out {output_res_m}m")
69
+ print(f"Footprint (1/e^2): {footprint_diameter_m}m (Sigma: {sigma_m:.2f}m / {sigma_px:.2f} px)")
70
+
71
+ def _create_gaussian_kernel(self, size, sigma):
72
+ coords = torch.arange(size).float() - (size - 1) / 2
73
+ x_grid, y_grid = torch.meshgrid(coords, coords, indexing='ij')
74
+ kernel = torch.exp(-(x_grid**2 + y_grid**2) / (2 * sigma**2))
75
+ kernel = kernel / kernel.sum()
76
+ return kernel.view(1, 1, size, size)
77
+
78
+ def forward(self, X_h, altitude=500.0):
79
+ if X_h.ndim == 3:
80
+ X_h = X_h.unsqueeze(0)
81
+
82
+ batch_size, num_bins, h_in, w_in = X_h.shape
83
+
84
+ # --- 1. Dynamic Output Size ---
85
+ fov_h_m = h_in * self.input_res_m[0]
86
+ fov_w_m = w_in * self.input_res_m[1]
87
+
88
+ out_h = int(fov_h_m / self.output_res_m[0])
89
+ out_w = int(fov_w_m / self.output_res_m[1])
90
+ output_size = (out_h, out_w)
91
+
92
+ # --- 2. Physics Normalization ---
93
+ energy_per_tube = X_h.sum(dim=1, keepdim=True)
94
+ global_mean_energy = energy_per_tube.mean(dim=(2, 3), keepdim=True)
95
+ X_norm = X_h / (global_mean_energy + 1e-8)
96
+
97
+ dist_scale = (self.ref_altitude / altitude) ** 2
98
+ target_intensity = (self.ref_photon_count / self.area_scale_factor) * dist_scale
99
+ X_scaled = X_norm * target_intensity
100
+
101
+ # --- 3. Spatial Blurring ---
102
+ # Expand single-channel kernel to match height bins
103
+ current_kernel = self.kernel.repeat(num_bins, 1, 1, 1)
104
+
105
+ padding = current_kernel.shape[-1] // 2
106
+ X_blurred = F.conv2d(X_scaled, current_kernel, padding=padding, groups=num_bins)
107
+
108
+ # --- 4. Downsampling ---
109
+ X_binned = F.interpolate(X_blurred, size=output_size, mode='area')
110
+ X_integrated = X_binned * self.area_scale_factor
111
+
112
+ # --- 5. Noise ---
113
+ lambda_val = torch.relu(X_integrated) + self.b
114
+ X_l = torch.poisson(lambda_val)
115
+ gaussian_noise = torch.randn_like(X_l) * self.eta
116
+ Y_l = X_l + gaussian_noise
117
+
118
+ if Y_l.shape[0] == 1:
119
+ Y_l = Y_l.squeeze(0)
120
+
121
+ return Y_l