Spaces:
Sleeping
Sleeping
face exp
Browse files
app/Hackathon_setup/exp_model.t7
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:05935a6dbaf7dc4656615384a5f99bf4c665d50195db06725026cc9440589f4d
|
| 3 |
+
size 554893562
|
app/Hackathon_setup/exp_recognition.py
CHANGED
|
@@ -65,7 +65,20 @@ def get_expression(img):
|
|
| 65 |
face = detected_face(img)
|
| 66 |
if face==0:
|
| 67 |
face = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
|
| 68 |
-
|
| 69 |
# YOUR CODE HERE, return expression using your model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
|
| 71 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
face = detected_face(img)
|
| 66 |
if face==0:
|
| 67 |
face = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
|
| 68 |
+
face = trnscm(face).unsqueeze(0)
|
| 69 |
# YOUR CODE HERE, return expression using your model
|
| 70 |
+
file = open(current_path + "/exp_model.t7")
|
| 71 |
+
file.seek(0, os.SEEK_END)
|
| 72 |
+
print("Size of file is :", file.tell(), "bytes")
|
| 73 |
+
exp_net = facExpRec()
|
| 74 |
+
model = torch.load(current_path + "/exp_model.t7", map_location="cpu")
|
| 75 |
+
exp_net.load_state_dict(model["exp_dict"]) ##
|
| 76 |
|
| 77 |
+
# YOUR CODE HERE, return similarity measure using your model
|
| 78 |
+
exp_net.eval()
|
| 79 |
+
face = face.to("cpu")
|
| 80 |
+
outputs = exp_net(face)
|
| 81 |
+
_, predicted = torch.max(outputs.data, 1)
|
| 82 |
+
classes = {0: 'ANGER', 1: 'DISGUST', 2: 'FEAR', 3: 'HAPPINESS', 4: 'NEUTRAL', 5: 'SADNESS', 6: 'SURPRISE'}
|
| 83 |
+
strexp = classes[predicted.item()]
|
| 84 |
+
return strexp
|
app/Hackathon_setup/exp_recognition_model.py
CHANGED
|
@@ -2,6 +2,7 @@ import torch
|
|
| 2 |
import torchvision
|
| 3 |
import torch.nn as nn
|
| 4 |
from torchvision import transforms
|
|
|
|
| 5 |
## Add more imports if required
|
| 6 |
|
| 7 |
####################################################################################################################
|
|
@@ -15,12 +16,25 @@ classes = {0: 'ANGER', 1: 'DISGUST', 2: 'FEAR', 3: 'HAPPINESS', 4: 'NEUTRAL', 5:
|
|
| 15 |
# Example Network
|
| 16 |
class facExpRec(torch.nn.Module):
|
| 17 |
def __init__(self):
|
| 18 |
-
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
def forward(self, x):
|
| 22 |
-
|
| 23 |
-
#YOUR CODE HERE
|
| 24 |
|
| 25 |
# Sample Helper function
|
| 26 |
def rgb2gray(image):
|
|
@@ -28,4 +42,8 @@ def rgb2gray(image):
|
|
| 28 |
|
| 29 |
# Sample Transformation function
|
| 30 |
#YOUR CODE HERE for changing the Transformation values.
|
| 31 |
-
trnscm = transforms.Compose([rgb2gray, transforms.Resize((48,48)), transforms.ToTensor()])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
import torchvision
|
| 3 |
import torch.nn as nn
|
| 4 |
from torchvision import transforms
|
| 5 |
+
import torchvision.models as models
|
| 6 |
## Add more imports if required
|
| 7 |
|
| 8 |
####################################################################################################################
|
|
|
|
| 16 |
# Example Network
|
| 17 |
class facExpRec(torch.nn.Module):
|
| 18 |
def __init__(self):
|
| 19 |
+
|
| 20 |
+
super().__init__()
|
| 21 |
+
self.model = models.vgg16(pretrained=True)
|
| 22 |
+
for params in self.model.parameters():
|
| 23 |
+
params.requires_grad = False
|
| 24 |
+
input_shape = self.model.classifier[6].in_features
|
| 25 |
+
|
| 26 |
+
self.model.classifier[6] = nn.Sequential(nn.Linear(input_shape, 1024),nn.ReLU(), nn.Dropout(0.2),
|
| 27 |
+
nn.Linear(1024,256), nn.ReLU(),nn.Dropout(0.2),
|
| 28 |
+
nn.Linear(256,7), nn.LogSoftmax(dim=1))
|
| 29 |
+
self.model.classifier[6].requires_grad = True
|
| 30 |
+
print("New Layers Added:")
|
| 31 |
+
for params in self.model.parameters():
|
| 32 |
+
if params.requires_grad:
|
| 33 |
+
print(params.shape)
|
| 34 |
+
|
| 35 |
|
| 36 |
def forward(self, x):
|
| 37 |
+
return self.model(x)
|
|
|
|
| 38 |
|
| 39 |
# Sample Helper function
|
| 40 |
def rgb2gray(image):
|
|
|
|
| 42 |
|
| 43 |
# Sample Transformation function
|
| 44 |
#YOUR CODE HERE for changing the Transformation values.
|
| 45 |
+
#trnscm = transforms.Compose([rgb2gray, transforms.Resize((48,48)), transforms.ToTensor()])
|
| 46 |
+
trnscm = transforms.Compose([rgb2gray, transforms.Resize((100,100)),
|
| 47 |
+
transforms.ToTensor(),
|
| 48 |
+
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))])
|
| 49 |
+
|