Spaces:
Sleeping
Sleeping
Commit
·
87fac06
1
Parent(s):
be07a6d
exp rec code
Browse files
app/Hackathon_setup/exp_recognition.py
CHANGED
|
@@ -67,5 +67,13 @@ def get_expression(img):
|
|
| 67 |
face = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
|
| 68 |
|
| 69 |
# YOUR CODE HERE, return expression using your model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
|
| 71 |
-
return "YET TO BE CODED"
|
|
|
|
| 67 |
face = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
|
| 68 |
|
| 69 |
# YOUR CODE HERE, return expression using your model
|
| 70 |
+
# YOUR CODE HERE, return expression using your model
|
| 71 |
+
face_det_net = ExpressionRecognitionCNN()
|
| 72 |
+
model = torch.load(current_path + '/expression_model.t7', map_location=device)
|
| 73 |
+
face_det_net.load_state_dict(model['net_dict'])
|
| 74 |
+
face = trnscm(face).unsqueeze(0)
|
| 75 |
+
output = face_det_net(face)
|
| 76 |
+
_, pred = torch.max(output, dim=1)
|
| 77 |
+
return classes[pred]
|
| 78 |
|
| 79 |
+
return "YET TO BE CODED"
|
app/Hackathon_setup/exp_recognition_model.py
CHANGED
|
@@ -21,6 +21,23 @@ class facExpRec(torch.nn.Module):
|
|
| 21 |
def forward(self, x):
|
| 22 |
pass # remove 'pass' once you have written your code
|
| 23 |
#YOUR CODE HERE
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
# Sample Helper function
|
| 26 |
def rgb2gray(image):
|
|
@@ -28,4 +45,7 @@ def rgb2gray(image):
|
|
| 28 |
|
| 29 |
# Sample Transformation function
|
| 30 |
#YOUR CODE HERE for changing the Transformation values.
|
| 31 |
-
trnscm = transforms.Compose([
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
def forward(self, x):
|
| 22 |
pass # remove 'pass' once you have written your code
|
| 23 |
#YOUR CODE HERE
|
| 24 |
+
|
| 25 |
+
class ExpressionRecognitionCNN(nn.Module):
|
| 26 |
+
def __init__(self, num_classes = 7):
|
| 27 |
+
super(ExpressionRecognitionCNN, self).__init__()
|
| 28 |
+
self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)
|
| 29 |
+
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
|
| 30 |
+
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
|
| 31 |
+
self.fc1 = nn.Linear(64 * 25 * 25, 128)
|
| 32 |
+
self.fc2 = nn.Linear(128, num_classes)
|
| 33 |
+
|
| 34 |
+
def forward(self, x):
|
| 35 |
+
x = self.pool(torch.relu(self.conv1(x)))
|
| 36 |
+
x = self.pool(torch.relu(self.conv2(x)))
|
| 37 |
+
x = x.view(-1, 64 * 25 * 25)
|
| 38 |
+
x = torch.relu(self.fc1(x))
|
| 39 |
+
x = self.fc2(x)
|
| 40 |
+
return x
|
| 41 |
|
| 42 |
# Sample Helper function
|
| 43 |
def rgb2gray(image):
|
|
|
|
| 45 |
|
| 46 |
# Sample Transformation function
|
| 47 |
#YOUR CODE HERE for changing the Transformation values.
|
| 48 |
+
trnscm = transforms.Compose([transforms.Grayscale(num_output_channels=1),
|
| 49 |
+
transforms.Resize((100, 100)),
|
| 50 |
+
transforms.ToTensor(),
|
| 51 |
+
transforms.Normalize((0.5,), (0.5,)),])
|