vrvundyala commited on
Commit
fdf56ae
·
1 Parent(s): bf73f6c

face recognition

Browse files
app/Hackathon_setup/face_recognition.py CHANGED
@@ -102,63 +102,7 @@ def get_similarity(img1, img2):
102
  #4) Perform necessary transformations to the input(detected face using the above function).
103
  #5) Along with the siamese, you need the classifier as well, which is to be finetuned with the faces that you are training
104
  ##Caution: Don't change the definition or function name; for loading the model use the current_path for path example is given in comments to the function
105
- # def get_face_class(img1):
106
- # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
107
- # BASE_DIR = os.path.dirname(os.path.abspath(__file__))
108
- #
109
- # # 1 Load the Decision Tree classifier
110
- # # clf_path = os.path.join(BASE_DIR, "decision_tree_model.sav")
111
- # clf_path = os.path.join(BASE_DIR, "logistic_regression_5.sav")
112
- # clf = joblib.load(clf_path)
113
- #
114
- # scaler_path = os.path.join(BASE_DIR, "standar_scaler.sav")
115
- # scaler = joblib.load(scaler_path)
116
- #
117
- # # 2 Load the Siamese feature extractor
118
- # myModel = Siamese().to(device)
119
- # ckpt_path = os.path.join(BASE_DIR, "siamese_model.t7")
120
- # ckpt = torch.load(ckpt_path, map_location=device)
121
- # myModel.load_state_dict(ckpt['net_dict'])
122
- # myModel.eval()
123
- #
124
- # # 3 Face detection (if available)
125
- # # det_img1 = detected_face(img1) # returns cropped face or 0 if not detected
126
- # # if det_img1 == 0:
127
- # # fallback: use original image
128
- # # det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
129
- #
130
- # # 4 Transform the face
131
- # img_tensor = trnscm(img1).unsqueeze(0)
132
- #
133
- # # 5 Extract embeddings
134
- # with torch.no_grad():
135
- # embedding = myModel.forward_once(img_tensor)
136
- # embedding = embedding.view(embedding.size(0), -1).cpu().numpy() # shape (1, embedding_dim)
137
- #
138
- # # 6 Predict class using Decision Tree
139
- # pred_label = clf.predict(scaler.transform(embedding))[0]
140
- #
141
- #
142
- # # --- Predict ---
143
- # # scaled_emb = scaler.transform(embedding)
144
- # # probs = clf.predict_proba(scaled_emb)
145
- # # pred_label = np.argmax(probs)
146
- # # confidence = probs[0, pred_label]
147
- #
148
- #
149
- #
150
- # # 7 Optional: return class name (if available)
151
- # # If you have the dataset available:
152
- # # class_names = finalClassifierDset.classes
153
- # # return class_names[pred_label]
154
- # # class_names = ['Aayush', 'Aditya', 'Vikram']
155
- # # return class_names[pred_label] + " " + str(pred_label)
156
- # class_names = ['Aayush', 'Aditya', 'Vikram']
157
- # return f"{class_names[pred_label]} {pred_label}"
158
-
159
-
160
  def get_face_class(img1):
161
-
162
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
163
  BASE_DIR = os.path.dirname(os.path.abspath(__file__))
164
 
@@ -177,18 +121,75 @@ def get_face_class(img1):
177
  myModel.load_state_dict(ckpt['net_dict'])
178
  myModel.eval()
179
  myModel = myModel.float()
 
 
 
 
 
180
 
181
- img_tensor = transform1(img1).unsqueeze(0).to(device).float()
 
182
 
 
183
  with torch.no_grad():
184
  embedding = myModel.forward_once(img_tensor)
185
- embedding = embedding.view(embedding.size(0), -1).cpu().numpy() # shape (1, embedding_dim)
 
 
186
  pred_label = clf.predict(scaler.transform(embedding))[0]
187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  class_names = ['Aayush', 'Aditya', 'Vikram']
189
  return f"{class_names[pred_label]} {pred_label} {embedding}"
190
 
191
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
  # def get_face_class(img1):
193
  # """
194
  # img1: BGR image as numpy array (from cv2) OR path string accepted by detected_face.
 
102
  #4) Perform necessary transformations to the input(detected face using the above function).
103
  #5) Along with the siamese, you need the classifier as well, which is to be finetuned with the faces that you are training
104
  ##Caution: Don't change the definition or function name; for loading the model use the current_path for path example is given in comments to the function
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  def get_face_class(img1):
 
106
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
107
  BASE_DIR = os.path.dirname(os.path.abspath(__file__))
108
 
 
121
  myModel.load_state_dict(ckpt['net_dict'])
122
  myModel.eval()
123
  myModel = myModel.float()
124
+ # 3 Face detection (if available)
125
+ det_img1 = detected_face(img1) # returns cropped face or 0 if not detected
126
+ if det_img1 == 0:
127
+ # fallback: use original image
128
+ det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
129
 
130
+ # 4 Transform the face
131
+ img_tensor = trnscm(det_img1).unsqueeze(0).float()
132
 
133
+ # 5 Extract embeddings
134
  with torch.no_grad():
135
  embedding = myModel.forward_once(img_tensor)
136
+ embedding = embedding.view(embedding.size(0), -1).cpu().numpy() # shape (1, embedding_dim)
137
+
138
+ # 6 Predict class using Decision Tree
139
  pred_label = clf.predict(scaler.transform(embedding))[0]
140
 
141
+
142
+ # --- Predict ---
143
+ # scaled_emb = scaler.transform(embedding)
144
+ # probs = clf.predict_proba(scaled_emb)
145
+ # pred_label = np.argmax(probs)
146
+ # confidence = probs[0, pred_label]
147
+
148
+
149
+
150
+ # 7 Optional: return class name (if available)
151
+ # If you have the dataset available:
152
+ # class_names = finalClassifierDset.classes
153
+ # return class_names[pred_label]
154
+ # class_names = ['Aayush', 'Aditya', 'Vikram']
155
+ # return class_names[pred_label] + " " + str(pred_label)
156
  class_names = ['Aayush', 'Aditya', 'Vikram']
157
  return f"{class_names[pred_label]} {pred_label} {embedding}"
158
 
159
 
160
+ # def get_face_class(img1):
161
+ #
162
+ # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
163
+ # BASE_DIR = os.path.dirname(os.path.abspath(__file__))
164
+ #
165
+ # # 1 Load the Decision Tree classifier
166
+ # # clf_path = os.path.join(BASE_DIR, "decision_tree_model.sav")
167
+ # clf_path = os.path.join(BASE_DIR, "SVC_3.sav")
168
+ # clf = joblib.load(clf_path)
169
+ #
170
+ # scaler_path = os.path.join(BASE_DIR, "scaler.joblib")
171
+ # scaler = joblib.load(scaler_path)
172
+ #
173
+ # # 2 Load the Siamese feature extractor
174
+ # myModel = Siamese().to(device)
175
+ # ckpt_path = os.path.join(BASE_DIR, "siamese_model.t7")
176
+ # ckpt = torch.load(ckpt_path, map_location=device)
177
+ #
178
+ # myModel.load_state_dict(ckpt['net_dict'])
179
+ # myModel.eval()
180
+ # myModel = myModel.float()
181
+ #
182
+ # img_tensor = transform1(img1).unsqueeze(0).to(device).float()
183
+ #
184
+ # with torch.no_grad():
185
+ # embedding = myModel.forward_once(img_tensor)
186
+ # embedding = embedding.view(embedding.size(0), -1).cpu().numpy() # shape (1, embedding_dim)
187
+ # pred_label = clf.predict(scaler.transform(embedding))[0]
188
+ #
189
+ # class_names = ['Aayush', 'Aditya', 'Vikram']
190
+ # return f"{class_names[pred_label]} {pred_label} {embedding}"
191
+
192
+
193
  # def get_face_class(img1):
194
  # """
195
  # img1: BGR image as numpy array (from cv2) OR path string accepted by detected_face.
app/main.py CHANGED
@@ -91,7 +91,7 @@ async def create_upload_files(request: Request, file3: UploadFile = File(...)):
91
  face_rec_filename = 'app/static/' + file3.filename
92
  with open(face_rec_filename, 'wb') as f:
93
  f.write(contents)
94
-
95
  img1 = Image.open(face_rec_filename)
96
  # img1 = np.array(img1).reshape(img1.size[1], img1.size[0], 3).astype(np.uint8)
97
 
 
91
  face_rec_filename = 'app/static/' + file3.filename
92
  with open(face_rec_filename, 'wb') as f:
93
  f.write(contents)
94
+ print(face_rec_filename)
95
  img1 = Image.open(face_rec_filename)
96
  # img1 = np.array(img1).reshape(img1.size[1], img1.size[0], 3).astype(np.uint8)
97