tsaddev commited on
Commit
57a9163
·
1 Parent(s): dc4ef92

Update app/Hackathon_setup/exp_recognition_model.py

Browse files
app/Hackathon_setup/exp_recognition_model.py CHANGED
@@ -2,10 +2,6 @@ import torch
2
  import torchvision
3
  import torch.nn as nn
4
  from torchvision import transforms
5
- import transformers
6
- from transformers.utils import logging
7
-
8
- transformers.logging.set_verbosity_info()
9
  ## Add more imports if required
10
 
11
  ####################################################################################################################
@@ -19,48 +15,45 @@ classes = {0: 'ANGER', 1: 'DISGUST', 2: 'FEAR', 3: 'HAPPINESS', 4: 'NEUTRAL', 5:
19
  # Example Network
20
  class facExpRec(torch.nn.Module):
21
  def __init__(self, out_features=7):
22
- super().__init__()
23
 
24
- self.conv1 = self.convlayer(in_channels=1, out_channels=64, kernel_size=3)
25
- self.conv2 = self.convlayer(in_channels=64, out_channels=128, kernel_size=5)
26
- self.conv3 = self.convlayer(in_channels=128, out_channels=512, kernel_size=3)
27
- self.conv4 = self.convlayer(in_channels=512, out_channels=512, kernel_size=3, max_pool=1)
28
 
29
- self.fc1 = self.fclayer(512, 256)
30
- self.fc2 = self.fclayer(256, 512)
31
- self.fc3 = nn.Linear(512, out_features)
32
-
33
- def convlayer(self, in_channels, out_channels, kernel_size, max_pool=2):
34
- return nn.Sequential(
35
- nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1),
36
- nn.BatchNorm2d(out_channels),
37
- nn.ReLU(),
38
- nn.Dropout2d(),
39
- nn.MaxPool2d(kernel_size=max_pool),
40
- )
41
 
42
- def fclayer(self, in_features, out_features):
43
- return nn.Sequential(
44
- nn.Linear(in_features, out_features),
45
- nn.BatchNorm1d(out_features),
46
- nn.Dropout1d(0.4),
47
- nn.ReLU(),
48
- )
49
 
50
-
51
- def forward(self, x):
52
- logger.info(x.shape)
53
- x = self.conv1(x)
54
- x = self.conv2(x)
55
- x = self.conv3(x)
56
- x = self.conv4(x)
57
- logger.info(x.shape)
58
- x = x.view(-1, 512)
59
- logger.info(x.shape)
60
- x = self.fc1(x)
61
- x = self.fc2(x)
62
- x = self.fc3(x)
63
- return x
64
 
65
  # Sample Helper function
66
  def rgb2gray(image):
 
2
  import torchvision
3
  import torch.nn as nn
4
  from torchvision import transforms
 
 
 
 
5
  ## Add more imports if required
6
 
7
  ####################################################################################################################
 
15
  # Example Network
16
  class facExpRec(torch.nn.Module):
17
  def __init__(self, out_features=7):
18
+ super().__init__()
19
 
20
+ self.conv1 = self.convlayer(in_channels=1, out_channels=64, kernel_size=3)
21
+ self.conv2 = self.convlayer(in_channels=64, out_channels=128, kernel_size=5)
22
+ self.conv3 = self.convlayer(in_channels=128, out_channels=512, kernel_size=3)
23
+ self.conv4 = self.convlayer(in_channels=512, out_channels=512, kernel_size=3, max_pool=1)
24
 
25
+ self.fc1 = self.fclayer(512, 256)
26
+ self.fc2 = self.fclayer(256, 512)
27
+ self.fc3 = nn.Linear(512, out_features)
28
+
29
+ def convlayer(self, in_channels, out_channels, kernel_size, max_pool=2):
30
+ return nn.Sequential(
31
+ nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1),
32
+ nn.BatchNorm2d(out_channels),
33
+ nn.ReLU(),
34
+ nn.Dropout2d(),
35
+ nn.MaxPool2d(kernel_size=max_pool),
36
+ )
37
 
38
+ def fclayer(self, in_features, out_features):
39
+ return nn.Sequential(
40
+ nn.Linear(in_features, out_features),
41
+ nn.BatchNorm1d(out_features),
42
+ nn.Dropout1d(0.4),
43
+ nn.ReLU(),
44
+ )
45
 
46
+
47
+ def forward(self, x):
48
+ x = self.conv1(x)
49
+ x = self.conv2(x)
50
+ x = self.conv3(x)
51
+ x = self.conv4(x)
52
+ x = x.view(-1, 512)
53
+ x = self.fc1(x)
54
+ x = self.fc2(x)
55
+ x = self.fc3(x)
56
+ return x
 
 
 
57
 
58
  # Sample Helper function
59
  def rgb2gray(image):