Surya152002 commited on
Commit
3deb83f
·
1 Parent(s): 9d4f7a3

Update streamlit_app.py

Browse files
Files changed (1) hide show
  1. streamlit_app.py +223 -3
streamlit_app.py CHANGED
@@ -2,13 +2,230 @@ import streamlit as st
2
  import pandas as pd
3
  import cv2
4
  import base64
5
- import os
6
  import numpy as np
7
  import datetime
8
  import csv
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- # Your existing Python code for face recognition, etc.
11
- # ...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  # Function to check login credentials (Dummy function, replace with real logic)
14
  def check_login(username, password):
@@ -44,6 +261,9 @@ def main():
44
  recognition(face_image, i)
45
  st.image(opencv_image, channels="BGR", caption="Processed Image")
46
 
 
 
 
47
  if 'recognized_names' in globals():
48
  # Show Attendance Table
49
  if recognized_names:
 
2
  import pandas as pd
3
  import cv2
4
  import base64
 
5
  import numpy as np
6
  import datetime
7
  import csv
8
+ import torch
9
+ from torchvision import transforms
10
+ import sys
11
+ import os
12
+
13
+ #pytorch
14
+ from concurrent.futures import thread
15
+ from sqlalchemy import null
16
+ import torch
17
+ from torchvision import transforms
18
+ import time
19
+ from threading import Thread
20
+
21
+ #other lib
22
+ import sys
23
+ import numpy as np
24
+ import os
25
+ import cv2
26
+ import csv
27
+ import datetime
28
+
29
+ sys.path.insert(0, "yolov5_face")
30
+ from models.experimental import attempt_load
31
+ from utils.datasets import letterbox
32
+ from utils.general import check_img_size, non_max_suppression_face, scale_coords
33
+
34
+ # Check device
35
+ device = torch.device("cpu")
36
+
37
+ # Get model detect
38
+ ## Case 1:
39
+ # model = attempt_load("yolov5_face/yolov5s-face.pt", map_location=device)
40
 
41
+ ## Case 2:
42
+ model = attempt_load("yolov5_face/yolov5m-face.pt", map_location=device)
43
+
44
+ # Get model recognition
45
+ ## Case 1:
46
+ from insightface.insight_face import iresnet100
47
+ weight = torch.load("insightface/resnet100_backbone.pth", map_location = device)
48
+ model_emb = iresnet100()
49
+
50
+ ## Case 2:
51
+ #from insightface.insight_face import iresnet18
52
+ #weight = torch.load("insightface/resnet18_backbone.pth", map_location = device)
53
+ #model_emb = iresnet18()
54
+
55
+ model_emb.load_state_dict(weight)
56
+ model_emb.to(device)
57
+ model_emb.eval()
58
+ detected_faces = []
59
+
60
+ face_preprocess = transforms.Compose([
61
+ transforms.ToTensor(), # input PIL => (3,56,56), /255.0
62
+ transforms.Resize((112, 112)),
63
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
64
+ ])
65
+
66
+ isThread = True
67
+ score = 0
68
+ name = null
69
+
70
+ csv_filename = "recognized_faces.csv"
71
+ recognized_names = []
72
+ # Resize image
73
+ def resize_image(img0, img_size):
74
+ h0, w0 = img0.shape[:2] # orig hw
75
+ r = img_size / max(h0, w0) # resize image to img_size
76
+
77
+ if r != 1: # always resize down, only resize up if training with augmentation
78
+ interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR
79
+ img0 = cv2.resize(img0, (int(w0 * r), int(h0 * r)), interpolation=interp)
80
+
81
+ imgsz = check_img_size(img_size, s=model.stride.max()) # check img_size
82
+ img = letterbox(img0, new_shape=imgsz)[0]
83
+
84
+ # Convert
85
+ img = img[:, :, ::-1].transpose(2, 0, 1).copy() # BGR to RGB, to 3x416x416
86
+
87
+ img = torch.from_numpy(img).to(device)
88
+ img = img.float() # uint8 to fp16/32
89
+ img /= 255.0 # 0 - 255 to 0.0 - 1.0
90
+
91
+ return img
92
+
93
+ def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None):
94
+ # Rescale coords (xyxy) from img1_shape to img0_shape
95
+ if ratio_pad is None: # calculate from img0_shape
96
+ gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
97
+ pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
98
+ else:
99
+ gain = ratio_pad[0][0]
100
+ pad = ratio_pad[1]
101
+
102
+ coords[:, [0, 2, 4, 6, 8]] -= pad[0] # x padding
103
+ coords[:, [1, 3, 5, 7, 9]] -= pad[1] # y padding
104
+ coords[:, :10] /= gain
105
+ #clip_coords(coords, img0_shape)
106
+ coords[:, 0].clamp_(0, img0_shape[1]) # x1
107
+ coords[:, 1].clamp_(0, img0_shape[0]) # y1
108
+ coords[:, 2].clamp_(0, img0_shape[1]) # x2
109
+ coords[:, 3].clamp_(0, img0_shape[0]) # y2
110
+ coords[:, 4].clamp_(0, img0_shape[1]) # x3
111
+ coords[:, 5].clamp_(0, img0_shape[0]) # y3
112
+ coords[:, 6].clamp_(0, img0_shape[1]) # x4
113
+ coords[:, 7].clamp_(0, img0_shape[0]) # y4
114
+ coords[:, 8].clamp_(0, img0_shape[1]) # x5
115
+ coords[:, 9].clamp_(0, img0_shape[0]) # y5
116
+ return coords
117
+
118
+ def get_face(input_image):
119
+ # Parameters
120
+ size_convert = 128
121
+ conf_thres = 0.4
122
+ iou_thres = 0.5
123
+
124
+ # Resize image
125
+ img = resize_image(input_image.copy(), size_convert)
126
+
127
+ # Via yolov5-face
128
+ with torch.no_grad():
129
+ pred = model(img[None, :])[0]
130
+
131
+ # Apply NMS
132
+ det = non_max_suppression_face(pred, conf_thres, iou_thres)[0]
133
+ bboxs = np.int32(scale_coords(img.shape[1:], det[:, :4], input_image.shape).round().cpu().numpy())
134
+
135
+ landmarks = np.int32(scale_coords_landmarks(img.shape[1:], det[:, 5:15], input_image.shape).round().cpu().numpy())
136
+
137
+ return bboxs, landmarks
138
+
139
+ def get_feature(face_image, training = True):
140
+ # Convert to RGB
141
+ face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)
142
+
143
+ # Preprocessing image BGR
144
+ face_image = face_preprocess(face_image).to(device)
145
+
146
+ # Via model to get feature
147
+ with torch.no_grad():
148
+ if training:
149
+ emb_img_face = model_emb(face_image[None, :])[0].cpu().numpy()
150
+ else:
151
+ emb_img_face = model_emb(face_image[None, :]).cpu().numpy()
152
+
153
+ # Convert to array
154
+ images_emb = emb_img_face/np.linalg.norm(emb_img_face)
155
+ return images_emb
156
+
157
+ def read_features(root_fearure_path = "static/feature/face_features.npz"):
158
+ data = np.load(root_fearure_path, allow_pickle=True)
159
+ images_name = data["arr1"]
160
+ images_emb = data["arr2"]
161
+
162
+ return images_name, images_emb
163
+
164
+ def recognition(face_image, index):
165
+
166
+ global recognized_names # Use the global list to maintain recognized names
167
+ # Get feature from face
168
+ query_emb = (get_feature(face_image, training=False))
169
+
170
+ # Read features
171
+ images_names, images_embs = read_features()
172
+
173
+ scores = (query_emb @ images_embs.T)[0]
174
+
175
+ id_min = np.argmax(scores)
176
+ score = scores[id_min]
177
+ name = images_names[id_min]
178
+ # Set the caption based on the score
179
+ if score < 0.35:
180
+ caption = "UNKNOWN"
181
+ else:
182
+ caption = name
183
+
184
+ # Save the recognized face to the CSV file
185
+ if score >= 0.35:
186
+ if caption not in recognized_names:
187
+ recognized_names.append(caption)
188
+
189
+ # Save the recognized face to the CSV file
190
+ now = datetime.datetime.now()
191
+ date = now.strftime("%Y-%m-%d")
192
+ time = now.strftime("%H:%M:%S")
193
+
194
+ with open(csv_filename, 'a', newline='') as file:
195
+ writer = csv.writer(file)
196
+ writer.writerow([caption, date, time])
197
+
198
+ print(f"Face {index}: Score: {score:.2f}, Name: {caption}")
199
+ return score, caption
200
+
201
+
202
+
203
+ def create_csv_file(filename):
204
+ with open(filename, 'w', newline='') as file:
205
+ writer = csv.writer(file)
206
+ writer.writerow(["Name", "Date", "Time"])
207
+
208
+ # Create the CSV file if it doesn't exist
209
+ if not os.path.exists(csv_filename):
210
+ create_csv_file(csv_filename)
211
+
212
+ def recognize_from_images(image_folder):
213
+ if not os.path.exists(image_folder):
214
+ print(f"Image folder '{image_folder}' doesn't exist.")
215
+ return
216
+
217
+ for image_name in os.listdir(image_folder):
218
+ if image_name.endswith(("png", 'jpg', 'jpeg')):
219
+ image_path = os.path.join(image_folder, image_name)
220
+ input_image = cv2.imread(image_path)
221
+
222
+ # Get faces
223
+ bboxs, _ = get_face(input_image)
224
+
225
+ # Get boxes
226
+ for i, (x1, y1, x2, y2) in enumerate(bboxs):
227
+ face_image = input_image[y1:y2, x1:x2]
228
+ recognition(face_image, i)
229
 
230
  # Function to check login credentials (Dummy function, replace with real logic)
231
  def check_login(username, password):
 
261
  recognition(face_image, i)
262
  st.image(opencv_image, channels="BGR", caption="Processed Image")
263
 
264
+ # Additional logic for face recognition using a laptop's camera
265
+ # This part of the code needs to be adapted based on your specific requirements and setup
266
+
267
  if 'recognized_names' in globals():
268
  # Show Attendance Table
269
  if recognized_names: