File size: 4,339 Bytes
ca4c713
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import math

import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F

try:
    # When imported as part of the package.
    from .canopy_plots import createCHM
except ImportError:
    # Fallback for running as a standalone script (python -m hhdc.forward_model).
    from hhdc.canopy_plots import createCHM


class LidarForwardImagingModel(nn.Module):
    def __init__(
        self,
        input_res_m=(2.0, 2.0),
        output_res_m=(3.0, 6.0),
        footprint_diameter_m=10.0,
        b=0.1,
        eta=0.5,
        ref_altitude=500.0,
        ref_photon_count=20.0,
    ):
        """
        Args:
            input_res_m (tuple): Physical size of input pixels (dy, dx) in meters.
            output_res_m (tuple): Physical size of output pixels (dy, dx) in meters.
            footprint_diameter_m (float): The 1/e^2 beam diameter in meters.
            b (float): Background noise.
            eta (float): Readout noise.
            ref_altitude (float): Reference altitude (km).
            ref_photon_count (float): Target photon count.
        """
        super().__init__()
        self.b = b
        self.eta = eta
        self.ref_altitude = ref_altitude
        self.ref_photon_count = ref_photon_count

        self.input_res_m = input_res_m
        self.output_res_m = output_res_m

        # 1. Calculate Area Scale Factor
        in_area = input_res_m[0] * input_res_m[1]
        out_area = output_res_m[0] * output_res_m[1]
        self.area_scale_factor = out_area / in_area

        # 2. Calculate Sigma in Input Pixels
        # Def: 1/e^2 diameter is 4 * sigma
        sigma_m = footprint_diameter_m / 4.0

        avg_input_res = (input_res_m[0] + input_res_m[1]) / 2.0
        sigma_px = sigma_m / avg_input_res

        # 3. Create Base Kernel
        # We use 6*sigma for the kernel size to capture >99% of energy
        # (Since diameter is 4*sigma, this means kernel is 1.5x the footprint size)
        kernel_size = int(math.ceil(6 * sigma_px))
        if kernel_size % 2 == 0:
            kernel_size += 1

        self.register_buffer("kernel", self._create_gaussian_kernel(kernel_size, sigma_px))

        print(f"Model Initialized: In {input_res_m}m -> Out {output_res_m}m")
        print(f"Footprint (1/e^2): {footprint_diameter_m}m (Sigma: {sigma_m:.2f}m / {sigma_px:.2f} px)")

    def _create_gaussian_kernel(self, size, sigma):
        coords = torch.arange(size).float() - (size - 1) / 2
        x_grid, y_grid = torch.meshgrid(coords, coords, indexing='ij')
        kernel = torch.exp(-(x_grid**2 + y_grid**2) / (2 * sigma**2))
        kernel = kernel / kernel.sum()
        return kernel.view(1, 1, size, size)

    def forward(self, X_h, altitude=500.0):
        if X_h.ndim == 3:
            X_h = X_h.unsqueeze(0)

        batch_size, num_bins, h_in, w_in = X_h.shape

        # --- 1. Dynamic Output Size ---
        fov_h_m = h_in * self.input_res_m[0]
        fov_w_m = w_in * self.input_res_m[1]

        out_h = int(fov_h_m / self.output_res_m[0])
        out_w = int(fov_w_m / self.output_res_m[1])
        output_size = (out_h, out_w)

        # --- 2. Physics Normalization ---
        energy_per_tube = X_h.sum(dim=1, keepdim=True)
        global_mean_energy = energy_per_tube.mean(dim=(2, 3), keepdim=True)
        X_norm = X_h / (global_mean_energy + 1e-8)

        dist_scale = (self.ref_altitude / altitude) ** 2
        target_intensity = (self.ref_photon_count / self.area_scale_factor) * dist_scale
        X_scaled = X_norm * target_intensity

        # --- 3. Spatial Blurring ---
        # Expand single-channel kernel to match height bins
        current_kernel = self.kernel.repeat(num_bins, 1, 1, 1)

        padding = current_kernel.shape[-1] // 2
        X_blurred = F.conv2d(X_scaled, current_kernel, padding=padding, groups=num_bins)

        # --- 4. Downsampling ---
        X_binned = F.interpolate(X_blurred, size=output_size, mode='area')
        X_integrated = X_binned * self.area_scale_factor

        # --- 5. Noise ---
        lambda_val = torch.relu(X_integrated) + self.b
        X_l = torch.poisson(lambda_val)
        gaussian_noise = torch.randn_like(X_l) * self.eta
        Y_l = X_l + gaussian_noise

        if Y_l.shape[0] == 1:
            Y_l = Y_l.squeeze(0)

        return Y_l