PavaniYerra commited on
Commit
1bf6e79
·
1 Parent(s): 8d09de7

FaceREcognition changes stage3

Browse files
app/Hackathon_setup/decision_tree_model.sav ADDED
Binary file (2.84 kB). View file
 
app/Hackathon_setup/face_recognition.py CHANGED
@@ -21,6 +21,19 @@ import pickle
21
 
22
  # Current_path stores absolute path of the file from where it runs.
23
  current_path = os.path.dirname(os.path.abspath(__file__))
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
  #1) The below function is used to detect faces in the given image.
26
  #2) It returns only one image which has maximum area out of all the detected faces in the photo.
@@ -75,8 +88,35 @@ def get_similarity(img1, img2):
75
  # YOUR CODE HERE, load the model
76
 
77
  # YOUR CODE HERE, return similarity measure using your model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
 
79
- return 0
 
 
 
80
 
81
  #1) Image captured from mobile is passed as parameter to this function in the API call, It returns the face class in the string form ex: "Person1"
82
  #2) The image is passed to the function in base64 encoding, Code to decode the image provided within the function
@@ -93,4 +133,38 @@ def get_face_class(img1):
93
  ##YOUR CODE HERE, return face class here
94
  ##Hint: you need a classifier finetuned for your classes, it takes o/p of siamese as i/p to it
95
  ##Better Hint: Siamese experiment is covered in one of the labs
96
- return "YET TO BE CODED"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  # Current_path stores absolute path of the file from where it runs.
23
  current_path = os.path.dirname(os.path.abspath(__file__))
24
+ # --- GLOBAL SETUP: Must match your training transforms ---
25
+ # Define the transformation pipeline for inference
26
+ trnscm = transforms.Compose([
27
+ transforms.Grayscale(num_output_channels=1),
28
+ transforms.Resize((100, 100)),
29
+ transforms.ToTensor()
30
+ ])
31
+ CLASS_NAMES = ['Person0', 'Person1', 'Person2', 'Person3', 'Person4'] # ADJUST THIS!
32
+
33
+ # --- Model Filenames ---
34
+ SIAMESE_MODEL_PATH = current_path + '/siamese_model.t7'
35
+ KNN_CLASSIFIER_PATH = current_path + '/decision_tree_model.sav'
36
+ SCALER_PATH = current_path + '/face_recognition_scaler.sav'
37
 
38
  #1) The below function is used to detect faces in the given image.
39
  #2) It returns only one image which has maximum area out of all the detected faces in the photo.
 
88
  # YOUR CODE HERE, load the model
89
 
90
  # YOUR CODE HERE, return similarity measure using your model
91
+ # 1. Initialize and Load Siamese Network
92
+ try:
93
+ # Assuming your Siamese Network class is named 'SiameseNetwork'
94
+ siamese_net = SiameseNetwork().to(device)
95
+ siamese_net.load_state_dict(torch.load(SIAMESE_MODEL_PATH, map_location=device))
96
+ siamese_net.eval()
97
+ except Exception as e:
98
+ print(f"Error loading Siamese Model: {e}")
99
+ return -1 # Return error code
100
+
101
+ # 2. Get Features (Embeddings)
102
+ with torch.no_grad():
103
+ # Get the feature vector from one tower/forward_once method
104
+ # Ensure your SiameseNetwork class has a forward_once or get_embedding method
105
+ embed1 = siamese_net.forward_once(face1).cpu().numpy()
106
+ embed2 = siamese_net.forward_once(face2).cpu().numpy()
107
+
108
+ # 3. Calculate Similarity Measure
109
+ # The Euclidean distance is the fundamental metric used by the Triplet/Contrastive loss.
110
+ # We return the NEGATIVE Euclidean distance or COSINE similarity, as *higher* value usually means *more* similar.
111
+
112
+ # Option A: Euclidean Distance (Lower is better) -> return NEGATIVE distance for API expectation
113
+ # distance = euclidean_distances(embed1, embed2)[0][0]
114
+ # similarity = -distance
115
 
116
+ # Option B: Cosine Similarity (Higher is better) -> Recommended
117
+ similarity = cosine_similarity(embed1, embed2)[0][0]
118
+
119
+ return float(similarity)
120
 
121
  #1) Image captured from mobile is passed as parameter to this function in the API call, It returns the face class in the string form ex: "Person1"
122
  #2) The image is passed to the function in base64 encoding, Code to decode the image provided within the function
 
133
  ##YOUR CODE HERE, return face class here
134
  ##Hint: you need a classifier finetuned for your classes, it takes o/p of siamese as i/p to it
135
  ##Better Hint: Siamese experiment is covered in one of the labs
136
+ face1_tensor = trnscm(det_img1).unsqueeze(0).to(device)
137
+
138
+ # 1. Load Siamese Network (Feature Extractor)
139
+ try:
140
+ siamese_net = SiameseNetwork().to(device)
141
+ siamese_net.load_state_dict(torch.load(SIAMESE_MODEL_PATH, map_location=device))
142
+ siamese_net.eval()
143
+ except Exception as e:
144
+ return f"Error loading Siamese Model: {e}"
145
+
146
+ # 2. Extract Embedding
147
+ with torch.no_grad():
148
+ embedding_np = siamese_net.forward_once(face1_tensor).cpu().numpy()
149
+
150
+ # 3. Load Sklearn Scaler and Classifier (Joblib)
151
+ try:
152
+ knn_classifier = joblib.load(KNN_CLASSIFIER_PATH)
153
+ scaler = joblib.load(SCALER_PATH)
154
+ except Exception as e:
155
+ return f"Error loading Sklearn models: {e}"
156
+
157
+ # 4. Preprocess Embedding and Predict
158
+ # The embedding must be reshaped to (1, N_features) for the scaler
159
+ embedding_scaled = scaler.transform(embedding_np.reshape(1, -1))
160
+
161
+ # Perform prediction (returns a NumPy array with the predicted label index)
162
+ predicted_label_index = knn_classifier.predict(embedding_scaled)[0]
163
+
164
+ # 5. Map index to Class Name
165
+ if predicted_label_index < len(CLASS_NAMES):
166
+ predicted_class_name = CLASS_NAMES[predicted_label_index]
167
+ else:
168
+ predicted_class_name = "UNKNOWN_CLASS"
169
+
170
+ return predicted_class_name
app/Hackathon_setup/face_recognition_model.py CHANGED
@@ -53,5 +53,32 @@ def get_similarity(img1, img2):
53
  # YOUR CODE HERE, load the model
54
 
55
  # YOUR CODE HERE, return similarity measure using your model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
- return 0
 
53
  # YOUR CODE HERE, load the model
54
 
55
  # YOUR CODE HERE, return similarity measure using your model
56
+ # 1. Initialize and Load Siamese Network
57
+ try:
58
+ # Assuming your Siamese Network class is named 'SiameseNetwork'
59
+ siamese_net = SiameseNetwork().to(device)
60
+ siamese_net.load_state_dict(torch.load(SIAMESE_MODEL_PATH, map_location=device))
61
+ siamese_net.eval()
62
+ except Exception as e:
63
+ print(f"Error loading Siamese Model: {e}")
64
+ return -1 # Return error code
65
+
66
+ # 2. Get Features (Embeddings)
67
+ with torch.no_grad():
68
+ # Get the feature vector from one tower/forward_once method
69
+ # Ensure your SiameseNetwork class has a forward_once or get_embedding method
70
+ embed1 = siamese_net.forward_once(face1).cpu().numpy()
71
+ embed2 = siamese_net.forward_once(face2).cpu().numpy()
72
+
73
+ # 3. Calculate Similarity Measure
74
+ # The Euclidean distance is the fundamental metric used by the Triplet/Contrastive loss.
75
+ # We return the NEGATIVE Euclidean distance or COSINE similarity, as *higher* value usually means *more* similar.
76
+
77
+ # Option A: Euclidean Distance (Lower is better) -> return NEGATIVE distance for API expectation
78
+ # distance = euclidean_distances(embed1, embed2)[0][0]
79
+ # similarity = -distance
80
+
81
+ # Option B: Cosine Similarity (Higher is better) -> Recommended
82
+ similarity = cosine_similarity(embed1, embed2)[0][0]
83
 
84
+ return float(similarity)
app/Hackathon_setup/face_recognition_scaler.sav ADDED
Binary file (719 Bytes). View file