import torch import torch.nn as nn class ConvBlock(nn.Module): def __init__(self, in_channels:int, out_channels:int) : super().__init__() # first convolutional layer self.conv_1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False) self.batch_norm_1 = nn.BatchNorm2d(num_features=out_channels) # second convolutional layer self.conv_2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False) self.batch_norm_2 = nn.BatchNorm2d(num_features=out_channels) self.activation = nn.ReLU(inplace=True) def forward(self, x): # basic conv -> bn -> relu forward pass output = self.activation(self.batch_norm_1(self.conv_1(x))) output = self.activation(self.batch_norm_2(self.conv_2(output))) return output class PlantCNN(nn.Module): def __init__(self, num_classes:int, channels, dropout: float): super().__init__() # entry block to map RGB -> 64 channels first_c = channels[0] self.input_block = nn.Sequential( nn.Conv2d(in_channels=3, out_channels=first_c, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(num_features=first_c), nn.ReLU(inplace=True) ) self.stages = nn.ModuleList() in_c = first_c for c in channels: stage = nn.Sequential( ConvBlock(in_c,c), ConvBlock(c,c), nn.MaxPool2d(kernel_size=2) ) self.stages.append(stage) in_c = c # final pooling + classifer self.pool = nn.AdaptiveAvgPool2d(1) self.dropout = nn.Dropout(dropout) self.fc = nn.Linear(channels[-1], num_classes) #change for app.py def forward(self, x): output = self.input_block(x) # pass through each stage in order for stage in self.stages: output = stage(output) # pool to (batch, 512) then flatten output = torch.flatten(self.pool(output), 1) output = self.fc(self.dropout(output)) return output