deepguess commited on
Commit
6202bfd
·
verified ·
1 Parent(s): 8a70fef

Upload model_resnet3d.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. model_resnet3d.py +189 -0
model_resnet3d.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ 3D ResNet for tornado detection + prediction.
3
+
4
+ Configs:
5
+ ResNet3D-18: BasicBlock, [2,2,2,2] (~11M params)
6
+ ResNet3D-34: BasicBlock, [3,4,6,3] (~21M params)
7
+ ResNet3D-50: Bottleneck, [3,4,6,3] (~40M params)
8
+
9
+ Input: (B, 24, 8, 128, 128) — 24 dual-pol channels, 8 time frames, 128x128 grid
10
+ Output: (B, 4) — [det_neg, det_pos, pred_neg, pred_pos]
11
+ """
12
+ import torch
13
+ import torch.nn as nn
14
+
15
+
16
+ class BasicBlock3D(nn.Module):
17
+ expansion = 1
18
+
19
+ def __init__(self, in_planes, planes, stride=1, downsample=None):
20
+ super().__init__()
21
+ self.conv1 = nn.Conv3d(in_planes, planes, kernel_size=3, stride=stride,
22
+ padding=1, bias=False)
23
+ self.bn1 = nn.BatchNorm3d(planes)
24
+ self.relu = nn.ReLU(inplace=True)
25
+ self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=1,
26
+ padding=1, bias=False)
27
+ self.bn2 = nn.BatchNorm3d(planes)
28
+ self.downsample = downsample
29
+
30
+ def forward(self, x):
31
+ identity = x
32
+ out = self.relu(self.bn1(self.conv1(x)))
33
+ out = self.bn2(self.conv2(out))
34
+ if self.downsample is not None:
35
+ identity = self.downsample(x)
36
+ out += identity
37
+ return self.relu(out)
38
+
39
+
40
+ class Bottleneck3D(nn.Module):
41
+ expansion = 4
42
+
43
+ def __init__(self, in_planes, planes, stride=1, downsample=None):
44
+ super().__init__()
45
+ self.conv1 = nn.Conv3d(in_planes, planes, kernel_size=1, bias=False)
46
+ self.bn1 = nn.BatchNorm3d(planes)
47
+ self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride,
48
+ padding=1, bias=False)
49
+ self.bn2 = nn.BatchNorm3d(planes)
50
+ self.conv3 = nn.Conv3d(planes, planes * self.expansion, kernel_size=1, bias=False)
51
+ self.bn3 = nn.BatchNorm3d(planes * self.expansion)
52
+ self.relu = nn.ReLU(inplace=True)
53
+ self.downsample = downsample
54
+
55
+ def forward(self, x):
56
+ identity = x
57
+ out = self.relu(self.bn1(self.conv1(x)))
58
+ out = self.relu(self.bn2(self.conv2(out)))
59
+ out = self.bn3(self.conv3(out))
60
+ if self.downsample is not None:
61
+ identity = self.downsample(x)
62
+ out += identity
63
+ return self.relu(out)
64
+
65
+
66
+ class ResNet3D(nn.Module):
67
+ """3D ResNet backbone. Returns feature vector of size 512 * block.expansion."""
68
+
69
+ def __init__(self, block, layers, in_channels=24):
70
+ super().__init__()
71
+ self.in_planes = 64
72
+
73
+ # Initial conv: don't downsample time aggressively
74
+ self.conv1 = nn.Conv3d(in_channels, 64, kernel_size=(3, 7, 7),
75
+ stride=(1, 2, 2), padding=(1, 3, 3), bias=False)
76
+ self.bn1 = nn.BatchNorm3d(64)
77
+ self.relu = nn.ReLU(inplace=True)
78
+ self.maxpool = nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1))
79
+
80
+ # Residual layers — spatial downsampling in layers 2-4, temporal in layer 3
81
+ self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
82
+ self.layer2 = self._make_layer(block, 128, layers[1], stride=2) # spatial /2
83
+ self.layer3 = self._make_layer(block, 256, layers[2], stride=(2, 2, 2)) # temporal /2, spatial /2
84
+ self.layer4 = self._make_layer(block, 512, layers[3], stride=(2, 2, 2)) # temporal /2, spatial /2
85
+
86
+ self.avgpool = nn.AdaptiveAvgPool3d(1)
87
+ self.feat_dim = 512 * block.expansion
88
+
89
+ # Weight initialization
90
+ for m in self.modules():
91
+ if isinstance(m, nn.Conv3d):
92
+ nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
93
+ elif isinstance(m, nn.BatchNorm3d):
94
+ nn.init.constant_(m.weight, 1)
95
+ nn.init.constant_(m.bias, 0)
96
+
97
+ def _make_layer(self, block, planes, num_blocks, stride=1):
98
+ downsample = None
99
+ if stride != 1 or self.in_planes != planes * block.expansion:
100
+ if isinstance(stride, int):
101
+ s = stride
102
+ else:
103
+ s = stride
104
+ downsample = nn.Sequential(
105
+ nn.Conv3d(self.in_planes, planes * block.expansion,
106
+ kernel_size=1, stride=s, bias=False),
107
+ nn.BatchNorm3d(planes * block.expansion),
108
+ )
109
+
110
+ layers = [block(self.in_planes, planes, stride, downsample)]
111
+ self.in_planes = planes * block.expansion
112
+ for _ in range(1, num_blocks):
113
+ layers.append(block(self.in_planes, planes))
114
+ return nn.Sequential(*layers)
115
+
116
+ def forward(self, x):
117
+ # x: (B, C, T, H, W) = (B, 24, 8, 128, 128)
118
+ x = self.relu(self.bn1(self.conv1(x))) # (B, 64, 8, 64, 64)
119
+ x = self.maxpool(x) # (B, 64, 8, 32, 32)
120
+ x = self.layer1(x) # (B, 64, 8, 32, 32)
121
+ x = self.layer2(x) # (B, 128, 8, 16, 16)
122
+ x = self.layer3(x) # (B, 256, 4, 8, 8)
123
+ x = self.layer4(x) # (B, 512, 2, 4, 4)
124
+ x = self.avgpool(x) # (B, 512, 1, 1, 1)
125
+ return x.flatten(1) # (B, 512)
126
+
127
+
128
+ class DualHeadResNet3D(nn.Module):
129
+ """Dual-head wrapper: detection + prediction heads on shared ResNet3D backbone."""
130
+
131
+ def __init__(self, block, layers, in_channels=24, drop_rate=0.3):
132
+ super().__init__()
133
+ self.backbone = ResNet3D(block, layers, in_channels)
134
+ feat_dim = self.backbone.feat_dim
135
+
136
+ self.dropout = nn.Dropout(drop_rate)
137
+ self.detect_head = nn.Linear(feat_dim, 2)
138
+ self.predict_head = nn.Linear(feat_dim, 2)
139
+
140
+ # Init heads
141
+ for head in [self.detect_head, self.predict_head]:
142
+ nn.init.normal_(head.weight, std=0.01)
143
+ nn.init.zeros_(head.bias)
144
+
145
+ def forward(self, x):
146
+ # x: (B, C, T, H, W)
147
+ features = self.backbone(x) # (B, feat_dim)
148
+ # FP32 cast before heads to prevent Inf grads under AMP
149
+ features = features.float()
150
+ features = self.dropout(features)
151
+ det = self.detect_head(features) # (B, 2)
152
+ pred = self.predict_head(features) # (B, 2)
153
+ return torch.cat([det, pred], dim=1) # (B, 4)
154
+
155
+
156
+ # --- Factory functions ---
157
+
158
+ CONFIGS = {
159
+ "resnet18": {"block": BasicBlock3D, "layers": [2, 2, 2, 2]},
160
+ "resnet34": {"block": BasicBlock3D, "layers": [3, 4, 6, 3]},
161
+ "resnet50": {"block": Bottleneck3D, "layers": [3, 4, 6, 3]},
162
+ }
163
+
164
+
165
+ def build_resnet3d(config="resnet34", in_channels=24, drop_rate=0.3):
166
+ """Build a DualHeadResNet3D from config name."""
167
+ cfg = CONFIGS[config]
168
+ return DualHeadResNet3D(cfg["block"], cfg["layers"], in_channels, drop_rate)
169
+
170
+
171
+ if __name__ == "__main__":
172
+ print("=== ResNet3D Model Configs ===\n")
173
+
174
+ for name in ["resnet18", "resnet34", "resnet50"]:
175
+ model = build_resnet3d(name)
176
+ n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
177
+ n_backbone = sum(p.numel() for p in model.backbone.parameters() if p.requires_grad)
178
+ print(f"{name}: {n_params:>12,} total params ({n_backbone:,} backbone)")
179
+
180
+ # Forward pass test
181
+ print("\nForward pass test (resnet34)...")
182
+ model = build_resnet3d("resnet34")
183
+ x = torch.randn(2, 24, 8, 128, 128)
184
+ with torch.no_grad():
185
+ out = model(x)
186
+ print(f" Input: {tuple(x.shape)}")
187
+ print(f" Output: {tuple(out.shape)} (expected (2, 4))")
188
+ assert out.shape == (2, 4), f"Expected (2, 4), got {out.shape}"
189
+ print(" PASSED")