Tejaswini2403 commited on
Commit
98c7fa2
·
1 Parent(s): b803b9a

second commit

Browse files
.gitattributes CHANGED
@@ -34,3 +34,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  app/Hackathon_setup/siamese_model.t7 filter=lfs diff=lfs merge=lfs -text
 
 
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  app/Hackathon_setup/siamese_model.t7 filter=lfs diff=lfs merge=lfs -text
37
+ app/Hackathon_setup/exp_recognition_net.t7 filter=lfs diff=lfs merge=lfs -text
app/Hackathon_setup/exp_recognition.py CHANGED
@@ -61,11 +61,16 @@ def get_expression(img):
61
  ##the same path as this file, we recommend to put in the same directory ##
62
  ##########################################################################################
63
  ##########################################################################################
64
-
 
 
 
65
  face = detected_face(img)
66
  if face==0:
67
  face = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
68
 
 
 
69
  # YOUR CODE HERE, return expression using your model
70
 
71
- return "YET TO BE CODED"
 
61
  ##the same path as this file, we recommend to put in the same directory ##
62
  ##########################################################################################
63
  ##########################################################################################
64
+
65
+ face_det_net = facExpRec()
66
+ model = torch.load(current_path + '/exp_recognition_net.t7', map_location=device)
67
+ face_det_net.load_state_dict(model['net_dict'])
68
  face = detected_face(img)
69
  if face==0:
70
  face = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
71
 
72
+ output = face_det_net(face)
73
+
74
  # YOUR CODE HERE, return expression using your model
75
 
76
+ return output
app/Hackathon_setup/exp_recognition_model.py CHANGED
@@ -15,12 +15,34 @@ classes = {0: 'ANGER', 1: 'DISGUST', 2: 'FEAR', 3: 'HAPPINESS', 4: 'NEUTRAL', 5:
15
  # Example Network
16
  class facExpRec(torch.nn.Module):
17
  def __init__(self):
18
- pass # remove 'pass' once you have written your code
19
- #YOUR CODE HERE
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  def forward(self, x):
22
- pass # remove 'pass' once you have written your code
23
- #YOUR CODE HERE
 
 
 
 
 
 
 
 
 
 
 
24
 
25
  # Sample Helper function
26
  def rgb2gray(image):
 
15
  # Example Network
16
  class facExpRec(torch.nn.Module):
17
  def __init__(self):
 
 
18
 
19
+ self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=3)
20
+ self.conv2 = nn.Conv2d(in_channels=16, out_channels=64, kernel_size=3)
21
+ self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3)
22
+ self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=1)
23
+ self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=1)
24
+ self.conv6 = nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=1)
25
+ self.fc1 = nn.Linear(1024 * 1 * 1, 256)
26
+ self.fc2 = nn.Linear(256, 128)
27
+ self.fc3 = nn.Linear(128, 64)
28
+ self.fc4 = nn.Linear(64, 7)
29
+
30
+ self.pool = nn.MaxPool2d(kernel_size=2)
31
+
32
  def forward(self, x):
33
+ x = self.pool(F.elu(self.conv1(x)))
34
+ x = self.pool(F.elu(self.conv2(x)))
35
+ x = self.pool(F.elu(self.conv3(x)))
36
+ x = self.pool(F.elu(self.conv4(x)))
37
+ x = self.pool(F.elu(self.conv5(x)))
38
+ x = self.pool(F.elu(self.conv6(x)))
39
+ x = x.view(-1, 1024 * 1 * 1)
40
+ x = F.elu(self.fc1(x))
41
+ x = F.elu(self.fc2(x))
42
+ x = F.elu(self.fc3(x))
43
+ x = self.fc4(x)
44
+ x = F.log_softmax(x, dim=1)
45
+ return x
46
 
47
  # Sample Helper function
48
  def rgb2gray(image):
app/Hackathon_setup/exp_recognition_net.t7 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03b6c535a608d66cca9a4e794ce5186c51b3b52b148254023b1c631ca71e36b2
3
+ size 4314282
app/Hackathon_setup/face_recognition.py CHANGED
@@ -99,7 +99,5 @@ def get_face_class(img1):
99
  det_img1 = detected_face(img1)
100
  if(det_img1 == 0):
101
  det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
102
- ##YOUR CODE HERE, return face class here
103
- ##Hint: you need a classifier finetuned for your classes, it takes o/p of siamese as i/p to it
104
- ##Better Hint: Siamese experiment is covered in one of the labs
105
  return "YET TO BE CODED"
 
99
  det_img1 = detected_face(img1)
100
  if(det_img1 == 0):
101
  det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
102
+
 
 
103
  return "YET TO BE CODED"