Hackthon4 / app /Hackathon_setup /face_recognition_model.py
gopikrishnait's picture
Update app/Hackathon_setup/face_recognition_model.py
9254341 verified
import math
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
# Add more imports if required
# Sample Transformation function
# YOUR CODE HERE for changing the Transformation values.
trnscm = transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor()])
##Example Network
class Siamese(torch.nn.Module):
def __init__(self):
super(Siamese, self).__init__()
self.cnn1 = nn.Sequential(
nn.ReflectionPad2d(1), #Pads the input tensor using the reflection of the input boundary, it similar to the padding.
nn.Conv2d(1, 4, kernel_size=3),
nn.ReLU(inplace=True),
nn.BatchNorm2d(4),
nn.ReflectionPad2d(1),
nn.Conv2d(4, 8, kernel_size=3),
nn.ReLU(inplace=True),
nn.BatchNorm2d(8),
nn.ReflectionPad2d(1),
nn.Conv2d(8, 8, kernel_size=3),
nn.ReLU(inplace=True),
nn.BatchNorm2d(8),
)
self.fc1 = nn.Sequential(
nn.Linear(8*100*100, 500),
nn.ReLU(inplace=True),
nn.Linear(500, 500),
nn.ReLU(inplace=True),
nn.Linear(500, 5))
# forward_once is for one image. This can be used while classifying the face images
def forward_once(self, x):
output = self.cnn1(x)
output = output.view(output.size()[0], -1)
output = self.fc1(output)
return output
def forward(self, input1, input2):
output1 = self.forward_once(input1)
output2 = self.forward_once(input2)
return output1, output2
##########################################################################################################
## Sample classification network (Specify if you are using a pytorch classifier during the training) ##
## classifier = nn.Sequential(nn.Linear(64, 64), nn.BatchNorm1d(64), nn.ReLU(), nn.Linear...) ##
##########################################################################################################
# YOUR CODE HERE for pytorch classifier
# Definition of classes as dictionary
classes = ['person1','person2','person3','person4','person5','person6','person7']
def get_similarity(img1, img2):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
det_img1 = detected_face(img1)
det_img2 = detected_face(img2)
if(det_img1 == 0 or det_img2 == 0):
det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
det_img2 = Image.fromarray(cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY))
face1 = trnscm(det_img1).unsqueeze(0)
face2 = trnscm(det_img2).unsqueeze(0)
##########################################################################################
##Example for loading a model using weight state dictionary: ##
## feature_net = light_cnn() #Example Network ##
## model = torch.load(current_path + '/siamese_model.t7', map_location=device) ##
## feature_net.load_state_dict(model['net_dict']) ##
## ##
##current_path + '/<network_definition>' is path of the saved model if present in ##
##the same path as this file, we recommend to put in the same directory ##
##########################################################################################
##########################################################################################
# YOUR CODE HERE, load the model
# YOUR CODE HERE, return similarity measure using your model
# 1. Initialize and Load Siamese Network
try:
# Assuming your Siamese Network class is named 'SiameseNetwork'
siamese_net = Siamese().to(device)
siamese_net.load_state_dict(torch.load(SIAMESE_MODEL_PATH, map_location=device))
siamese_net.eval()
except Exception as e:
print(f"Error loading Siamese Model: {e}")
return -1 # Return error code
# 2. Get Features (Embeddings)
with torch.no_grad():
# Get the feature vector from one tower/forward_once method
# Ensure your SiameseNetwork class has a forward_once or get_embedding method
embed1 = siamese_net.forward_once(face1).cpu().numpy()
embed2 = siamese_net.forward_once(face2).cpu().numpy()
# 3. Calculate Similarity Measure
# The Euclidean distance is the fundamental metric used by the Triplet/Contrastive loss.
# We return the NEGATIVE Euclidean distance or COSINE similarity, as *higher* value usually means *more* similar.
# Option A: Euclidean Distance (Lower is better) -> return NEGATIVE distance for API expectation
# distance = euclidean_distances(embed1, embed2)[0][0]
# similarity = -distance
# Option B: Cosine Similarity (Higher is better) -> Recommended
similarity = cosine_similarity(embed1, embed2)[0][0]
return float(similarity)
def get_face_class(img1):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
det_img1 = detected_face(img1)
if(det_img1 == 0):
det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
##YOUR CODE HERE, return face class here
##Hint: you need a classifier finetuned for your classes, it takes o/p of siamese as i/p to it
##Better Hint: Siamese experiment is covered in one of the labs
face1_tensor = trnscm(det_img1).unsqueeze(0).to(device)
# 1. Load Siamese Network (Feature Extractor)
try:
siamese_net = Siamese().to(device)
siamese_net.load_state_dict(torch.load(SIAMESE_MODEL_PATH, map_location=device))
siamese_net.eval()
except Exception as e:
return f"Error loading Siamese Model get_face_class: {e}"
# 2. Extract Embedding
with torch.no_grad():
embedding_np = siamese_net.forward_once(face1_tensor).cpu().numpy()
# 3. Load Sklearn Scaler and Classifier (Joblib)
try:
knn_classifier = joblib.load(KNN_CLASSIFIER_PATH)
scaler = joblib.load(SCALER_PATH)
except Exception as e:
return f"Error loading Sklearn models: {e}"
# 4. Preprocess Embedding and Predict
# The embedding must be reshaped to (1, N_features) for the scaler
embedding_scaled = scaler.transform(embedding_np.reshape(1, -1))
# Perform prediction (returns a NumPy array with the predicted label index)
predicted_label_index = knn_classifier.predict(embedding_scaled)[0]
# 5. Map index to Class Name
if predicted_label_index < len(CLASS_NAMES):
predicted_class_name = CLASS_NAMES[predicted_label_index]
else:
predicted_class_name = "UNKNOWN_CLASS"
return predicted_class_name