Spaces:
Runtime error
Runtime error
| import torch | |
| import torchvision | |
| import torch.nn as nn | |
| from torchvision import transforms | |
| from transformers.utils import logging | |
| logging.set_verbosity_info() | |
| logger = logging.get_logger("transformers") | |
| ## Add more imports if required | |
| #################################################################################################################### | |
| # Define your model and transform and all necessary helper functions here # | |
| # They will be imported to the exp_recognition.py file # | |
| #################################################################################################################### | |
| # Definition of classes as dictionary | |
| classes = {0: 'ANGER', 1: 'DISGUST', 2: 'FEAR', 3: 'HAPPINESS', 4: 'NEUTRAL', 5: 'SADNESS', 6: 'SURPRISE'} | |
| # Example Network | |
| class facExpRec(torch.nn.Module): | |
| def __init__(self, out_features=7): | |
| super().__init__() | |
| self.conv1 = self.convlayer(in_channels=1, out_channels=64, kernel_size=3, max_pool=2) | |
| self.conv2 = self.convlayer(in_channels=64, out_channels=128, kernel_size=3, max_pool=2) | |
| self.conv3 = self.convlayer(in_channels=128, out_channels=512, kernel_size=3, max_pool=2) | |
| self.conv4 = self.convlayer(in_channels=512, out_channels=512, kernel_size=3, max_pool=1) | |
| self.fc1 = self.fclayer(2048, 512) | |
| self.fc2 = nn.Linear(512, 7) | |
| def convlayer(self, in_channels, out_channels, kernel_size, max_pool=2): | |
| return nn.Sequential( | |
| nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1), | |
| nn.BatchNorm2d(out_channels), | |
| nn.ReLU(), | |
| # nn.Dropout2d(), | |
| nn.MaxPool2d(kernel_size=max_pool), | |
| ) | |
| def fclayer(self, in_features, out_features): | |
| return nn.Sequential( | |
| nn.Linear(in_features, out_features), | |
| nn.BatchNorm1d(out_features), | |
| # nn.Dropout1d(0.4), | |
| nn.ReLU(), | |
| ) | |
| def forward(self, x): | |
| x = self.conv1(x) | |
| x = self.conv2(x) | |
| x = self.conv3(x) | |
| x = self.conv4(x) | |
| x = x.view(-1, 2048) | |
| x = self.fc1(x) | |
| x = self.fc2(x) | |
| return x | |
| # Sample Helper function | |
| def rgb2gray(image): | |
| return image.convert('L') | |
| # Sample Transformation function | |
| #YOUR CODE HERE for changing the Transformation values. | |
| trnscm = transforms.Compose([transforms.Resize((48,48)),transforms.Grayscale(), transforms.ToTensor(), transforms.Normalize((0.5), (0.5))]) |