File size: 3,934 Bytes
d766bd2
 
ca465d9
 
d766bd2
ca465d9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d766bd2
 
ca465d9
 
 
 
 
 
d766bd2
ca465d9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d766bd2
ca465d9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d766bd2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import torch
import torch.nn as nn
import torch.nn.functional as F
import timm

# --- PART 1: The Stem Class ---
class InceptionStem(nn.Module):
    def __init__(self, in_channels=3, out_channels=64):
        super().__init__()
        self.branch1 = nn.Sequential(
            nn.Conv2d(in_channels, out_channels//2, kernel_size=1, stride=1, padding=0),
            nn.ReLU(inplace=True)
        )
        self.branch3 = nn.Sequential(
            nn.Conv2d(in_channels, out_channels//2, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True)
        )
        self.pool_branch = nn.Sequential(
            nn.MaxPool2d(3, stride=1, padding=1),
            nn.Conv2d(in_channels, out_channels//2, kernel_size=1),
            nn.ReLU(inplace=True)
        )
        self.project = nn.Conv2d(out_channels + out_channels//2, out_channels, kernel_size=1)
        self.bn = nn.BatchNorm2d(out_channels)

    def forward(self, x):
        b1 = self.branch1(x)
        b3 = self.branch3(x)
        p = self.pool_branch(x)
        cat = torch.cat([b1, b3, p], dim=1)
        out = F.relu(self.bn(self.project(cat)))
        return out

# --- PART 2: The Main Model Class ---
class InceptionViT(nn.Module):
    def __init__(self, vit_model_name='vit_base_patch16_224', pretrained=False, stem_out_channels=64, num_classes=2, dropout=0.3):
        # NOTE: We set pretrained=False here because we are loading YOUR weights,
        # so we don't need to download the ImageNet weights again.
        super().__init__()
        self.stem = InceptionStem(in_channels=3, out_channels=stem_out_channels)

        self.vit = timm.create_model(vit_model_name, pretrained=False, num_classes=0, global_pool='')
        self.embed_dim = self.vit.num_features
        self.stem_pool = nn.AdaptiveAvgPool2d(1)
        self.stem_proj = nn.Linear(stem_out_channels, self.embed_dim)

        self.classifier = nn.Sequential(
            nn.LayerNorm(self.embed_dim),
            nn.Dropout(dropout),
            nn.Linear(self.embed_dim, num_classes)
        )

    def forward(self, x):
        stem_feat_map = self.stem(x)
        
        stem_vec = self.stem_pool(stem_feat_map).view(x.size(0), -1)
        stem_emb = self.stem_proj(stem_vec)

        B = x.size(0)
        x_vit = self.vit.patch_embed(x)
        cls_tokens = self.vit.cls_token.expand(B, -1, -1)
        x_vit = torch.cat((cls_tokens, x_vit), dim=1)
        x_vit = x_vit + self.vit.pos_embed[:, : x_vit.size(1), :].to(x.device)
        x_vit = self.vit.pos_drop(x_vit)

        for blk in self.vit.blocks:
            x_vit = blk(x_vit)
        x_vit = self.vit.norm(x_vit)

        x_vit[:, 0, :] = x_vit[:, 0, :] + stem_emb

        cls_emb = x_vit[:, 0, :]
        out = self.classifier(cls_emb)
        return out

# --- PART 3: The Loader Function ---
def load_model(model_path='InceptionViT_best_model.pth'):
    print(f"Loading InceptionViT from {model_path}...")
    
    # 1. Initialize the empty model structure
    # CRITICAL: We must match the arguments you used in training!
    # Looking at your code: num_classes=?, dropout=0.4
    # Since I don't know your exact num_classes, I will inspect the weights to find it dynamically.
    
    # Load weights first to check dimensions
    state_dict = torch.load(model_path, map_location='cpu')
    
    # Auto-detect number of classes from the last layer weight
    # The key is usually "classifier.2.weight" based on your Sequential block
    if "classifier.2.weight" in state_dict:
        num_classes = state_dict["classifier.2.weight"].shape[0]
        print(f"Auto-detected {num_classes} classes.")
    else:
        num_classes = 2 # Default fallback
        print("Could not auto-detect classes, defaulting to 2.")

    # Create the model
    model = InceptionViT(num_classes=num_classes, dropout=0.4)
    
    # Load the weights
    model.load_state_dict(state_dict)
    model.eval()
    
    return model