Spaces:
Sleeping
Sleeping
Update app/Hackathon_setup/face_recognition_model.py
Browse files
app/Hackathon_setup/face_recognition_model.py
CHANGED
|
@@ -12,13 +12,56 @@ trnscm = transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor()
|
|
| 12 |
|
| 13 |
##Example Network
|
| 14 |
class Siamese(torch.nn.Module):
|
| 15 |
-
def
|
| 16 |
-
super(Siamese, self).
|
| 17 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
##########################################################################################################
|
| 24 |
## Sample classification network (Specify if you are using a pytorch classifier during the training) ##
|
|
|
|
| 12 |
|
| 13 |
##Example Network
|
| 14 |
class Siamese(torch.nn.Module):
|
| 15 |
+
def _init_(self, embedding_dim=128):
|
| 16 |
+
super(Siamese, self)._init_()
|
| 17 |
+
# Convolutional layers for feature extraction
|
| 18 |
+
self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)
|
| 19 |
+
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
|
| 20 |
+
self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
|
| 21 |
+
self.conv4 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
|
| 22 |
|
| 23 |
+
# Batch normalization layers
|
| 24 |
+
self.bn1 = nn.BatchNorm2d(32)
|
| 25 |
+
self.bn2 = nn.BatchNorm2d(64)
|
| 26 |
+
self.bn3 = nn.BatchNorm2d(128)
|
| 27 |
+
self.bn4 = nn.BatchNorm2d(256)
|
| 28 |
+
|
| 29 |
+
# Pooling layer
|
| 30 |
+
self.pool = nn.MaxPool2d(2, 2)
|
| 31 |
+
|
| 32 |
+
# Fully connected layers for embedding
|
| 33 |
+
self.fc1 = nn.Linear(256 * 6 * 6, 512) # 100x100 -> 6x6 after 4 pooling layers
|
| 34 |
+
self.fc2 = nn.Linear(512, embedding_dim)
|
| 35 |
+
|
| 36 |
+
# Dropout for regularization
|
| 37 |
+
self.dropout = nn.Dropout(0.5)
|
| 38 |
+
|
| 39 |
+
def forward_once(self, x):
|
| 40 |
+
"""Forward pass for a single input (used for feature extraction)"""
|
| 41 |
+
x = self.pool(F.relu(self.bn1(self.conv1(x))))
|
| 42 |
+
x = self.pool(F.relu(self.bn2(self.conv2(x))))
|
| 43 |
+
x = self.pool(F.relu(self.bn3(self.conv3(x))))
|
| 44 |
+
x = self.pool(F.relu(self.bn4(self.conv4(x))))
|
| 45 |
+
|
| 46 |
+
# Flatten the feature maps
|
| 47 |
+
x = x.view(x.size(0), -1)
|
| 48 |
+
|
| 49 |
+
# Fully connected layers
|
| 50 |
+
x = F.relu(self.fc1(x))
|
| 51 |
+
x = self.dropout(x)
|
| 52 |
+
x = self.fc2(x)
|
| 53 |
+
|
| 54 |
+
# L2 normalize the embeddings for better cosine similarity
|
| 55 |
+
x = F.normalize(x, p=2, dim=1)
|
| 56 |
+
|
| 57 |
+
return x
|
| 58 |
+
|
| 59 |
+
def forward(self, x1, x2):
|
| 60 |
+
"""Forward pass for siamese network (two inputs)"""
|
| 61 |
+
output1 = self.forward_once(x1)
|
| 62 |
+
output2 = self.forward_once(x2)
|
| 63 |
+
return output1, output2
|
| 64 |
+
|
| 65 |
|
| 66 |
##########################################################################################################
|
| 67 |
## Sample classification network (Specify if you are using a pytorch classifier during the training) ##
|