Spaces:
Sleeping
Sleeping
File size: 3,114 Bytes
66ff2bc 2a9a4f0 66ff2bc 2a9a4f0 66ff2bc 2a9a4f0 66ff2bc 2a9a4f0 66ff2bc 2a9a4f0 66ff2bc 2a9a4f0 66ff2bc 2a9a4f0 66ff2bc 2a9a4f0 66ff2bc 2a9a4f0 66ff2bc 2a9a4f0 66ff2bc 2a9a4f0 66ff2bc 2a9a4f0 66ff2bc 2a9a4f0 66ff2bc 8abf3a2 66ff2bc 8abf3a2 e8d631e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 | import torch
import torchvision
import torch.nn as nn
from torchvision import transforms
## Add more imports if required
####################################################################################################################
# Define your model and transform and all necessary helper functions here #
# They will be imported to the exp_recognition.py file #
####################################################################################################################
# Definition of classes as dictionary
classes = {0: 'ANGER', 1: 'DISGUST', 2: 'FEAR', 3: 'HAPPINESS', 4: 'NEUTRAL', 5: 'SADNESS', 6: 'SURPRISE'}
# Grayscale-compatible Expression CNN from notebook
# CNN model for Expression Recognition - Clean architecture with BatchNorm
class ExpressionCNN(nn.Module):
def __init__(self, num_classes=7):
super(ExpressionCNN, self).__init__()
# Conv Block 1
self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(32)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
# Conv Block 2
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(64)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
# Conv Block 3
self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.relu3 = nn.ReLU()
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
# Fully Connected Layers
self.fc1 = nn.Linear(128 * 6 * 6, 512)
self.bn4 = nn.BatchNorm1d(512)
self.relu4 = nn.ReLU()
self.fc2 = nn.Linear(512, 256)
self.bn5 = nn.BatchNorm1d(256)
self.relu5 = nn.ReLU()
self.fc3 = nn.Linear(256, num_classes)
def forward(self, x):
# Conv Block 1
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.pool1(x)
# Conv Block 2
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.pool2(x)
# Conv Block 3
x = self.conv3(x)
x = self.bn3(x)
x = self.relu3(x)
x = self.pool3(x)
# Flatten
x = x.view(x.size(0), -1)
# FC layers
x = self.fc1(x)
x = self.bn4(x)
x = self.relu4(x)
x = self.fc2(x)
x = self.bn5(x)
x = self.relu5(x)
x = self.fc3(x)
return x
# Backward compatibility alias
facExpRec = ExpressionCNN
# Sample Helper function
def rgb2gray(image):
return image.convert('L')
# Transformation function - normalized grayscale
trnscm = transforms.Compose([
rgb2gray, # Convert to grayscale
transforms.Resize((48, 48)), # Resize to model input size
transforms.ToTensor(), # Convert to tensor
transforms.Normalize(mean=[0.5], std=[0.5]) # Normalize grayscale image
])
|