tsaddev commited on
Commit
10057c4
·
1 Parent(s): 02ddc2f

Upload 26 files

Browse files
README.md CHANGED
@@ -6,8 +6,6 @@ colorTo: pink
6
  sdk: docker
7
  pinned: false
8
  license: mit
9
- app_port: 8001
10
  ---
11
 
12
-
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
6
  sdk: docker
7
  pinned: false
8
  license: mit
 
9
  ---
10
 
 
11
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app/Hackathon_setup/exp_recognition.py CHANGED
@@ -67,5 +67,9 @@ def get_expression(img):
67
  face = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
68
 
69
  # YOUR CODE HERE, return expression using your model
70
-
71
- return "YET TO BE CODED"
 
 
 
 
 
67
  face = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
68
 
69
  # YOUR CODE HERE, return expression using your model
70
+ face_det_net = facExpRec()
71
+ model = torch.load(current_path + '/exp_recognition_net.t7', map_location=device)
72
+ face_det_net.load_state_dict(model['net_dict'])
73
+ output = face_det_net(trnscm(face))
74
+ _, pred = torch.max(output)
75
+ return classes[pred]
app/Hackathon_setup/exp_recognition_model.py CHANGED
@@ -14,13 +14,46 @@ classes = {0: 'ANGER', 1: 'DISGUST', 2: 'FEAR', 3: 'HAPPINESS', 4: 'NEUTRAL', 5:
14
 
15
  # Example Network
16
  class facExpRec(torch.nn.Module):
17
- def __init__(self):
18
- pass # remove 'pass' once you have written your code
19
- #YOUR CODE HERE
20
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  def forward(self, x):
22
- pass # remove 'pass' once you have written your code
23
- #YOUR CODE HERE
 
 
 
 
 
 
 
24
 
25
  # Sample Helper function
26
  def rgb2gray(image):
@@ -28,4 +61,4 @@ def rgb2gray(image):
28
 
29
  # Sample Transformation function
30
  #YOUR CODE HERE for changing the Transformation values.
31
- trnscm = transforms.Compose([rgb2gray, transforms.Resize((48,48)), transforms.ToTensor()])
 
14
 
15
  # Example Network
16
  class facExpRec(torch.nn.Module):
17
+ def __init__(self, out_features=7):
18
+ super().__init__()
19
+
20
+ self.conv1 = self.convlayer(in_channels=1, out_channels=64, kernel_size=3)
21
+ self.conv2 = self.convlayer(in_channels=64, out_channels=128, kernel_size=5)
22
+ self.conv3 = self.convlayer(in_channels=128, out_channels=512, kernel_size=3)
23
+ self.conv4 = self.convlayer(in_channels=512, out_channels=512, kernel_size=3, max_pool=1)
24
+
25
+ self.fc1 = self.fclayer(512, 256)
26
+ self.fc2 = self.fclayer(256, 512)
27
+ self.fc3 = nn.Linear(512, out_features)
28
+
29
+ def convlayer(self, in_channels, out_channels, kernel_size, max_pool=2):
30
+ return nn.Sequential(
31
+ nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1),
32
+ nn.BatchNorm2d(out_channels),
33
+ nn.ReLU(),
34
+ nn.Dropout2d(),
35
+ nn.MaxPool2d(kernel_size=max_pool),
36
+ )
37
+
38
+ def fclayer(self, in_features, out_features):
39
+ return nn.Sequential(
40
+ nn.Linear(in_features, out_features),
41
+ nn.BatchNorm1d(out_features),
42
+ nn.Dropout1d(0.4),
43
+ nn.ReLU(),
44
+ )
45
+
46
+
47
  def forward(self, x):
48
+ x = self.conv1(x)
49
+ x = self.conv2(x)
50
+ x = self.conv3(x)
51
+ x = self.conv4(x)
52
+ x = x.view(-1, 512)
53
+ x = self.fc1(x)
54
+ x = self.fc2(x)
55
+ x = self.fc3(x)
56
+ return x
57
 
58
  # Sample Helper function
59
  def rgb2gray(image):
 
61
 
62
  # Sample Transformation function
63
  #YOUR CODE HERE for changing the Transformation values.
64
+ trnscm = transforms.Compose([rgb2gray, transforms.Resize((48,48)), transforms.ToTensor(), transforms.Normalize((0.5), (0.5))])
exp_recognition_net.t7 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eabf0a7bedebe56bb37aecc6908bf665898203a3db6b476579f313df4889cf40
3
+ size 13735490