face-attendance / streamlit_app.py
Surya152002's picture
Update streamlit_app.py
3deb83f
import streamlit as st
import pandas as pd
import cv2
import base64
import numpy as np
import datetime
import csv
import torch
from torchvision import transforms
import sys
import os
#pytorch
from concurrent.futures import thread
from sqlalchemy import null
import torch
from torchvision import transforms
import time
from threading import Thread
#other lib
import sys
import numpy as np
import os
import cv2
import csv
import datetime
sys.path.insert(0, "yolov5_face")
from models.experimental import attempt_load
from utils.datasets import letterbox
from utils.general import check_img_size, non_max_suppression_face, scale_coords
# Check device
device = torch.device("cpu")
# Get model detect
## Case 1:
# model = attempt_load("yolov5_face/yolov5s-face.pt", map_location=device)
## Case 2:
model = attempt_load("yolov5_face/yolov5m-face.pt", map_location=device)
# Get model recognition
## Case 1:
from insightface.insight_face import iresnet100
weight = torch.load("insightface/resnet100_backbone.pth", map_location = device)
model_emb = iresnet100()
## Case 2:
#from insightface.insight_face import iresnet18
#weight = torch.load("insightface/resnet18_backbone.pth", map_location = device)
#model_emb = iresnet18()
model_emb.load_state_dict(weight)
model_emb.to(device)
model_emb.eval()
detected_faces = []
face_preprocess = transforms.Compose([
transforms.ToTensor(), # input PIL => (3,56,56), /255.0
transforms.Resize((112, 112)),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
isThread = True
score = 0
name = null
csv_filename = "recognized_faces.csv"
recognized_names = []
# Resize image
def resize_image(img0, img_size):
h0, w0 = img0.shape[:2] # orig hw
r = img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR
img0 = cv2.resize(img0, (int(w0 * r), int(h0 * r)), interpolation=interp)
imgsz = check_img_size(img_size, s=model.stride.max()) # check img_size
img = letterbox(img0, new_shape=imgsz)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1).copy() # BGR to RGB, to 3x416x416
img = torch.from_numpy(img).to(device)
img = img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
return img
def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2, 4, 6, 8]] -= pad[0] # x padding
coords[:, [1, 3, 5, 7, 9]] -= pad[1] # y padding
coords[:, :10] /= gain
#clip_coords(coords, img0_shape)
coords[:, 0].clamp_(0, img0_shape[1]) # x1
coords[:, 1].clamp_(0, img0_shape[0]) # y1
coords[:, 2].clamp_(0, img0_shape[1]) # x2
coords[:, 3].clamp_(0, img0_shape[0]) # y2
coords[:, 4].clamp_(0, img0_shape[1]) # x3
coords[:, 5].clamp_(0, img0_shape[0]) # y3
coords[:, 6].clamp_(0, img0_shape[1]) # x4
coords[:, 7].clamp_(0, img0_shape[0]) # y4
coords[:, 8].clamp_(0, img0_shape[1]) # x5
coords[:, 9].clamp_(0, img0_shape[0]) # y5
return coords
def get_face(input_image):
# Parameters
size_convert = 128
conf_thres = 0.4
iou_thres = 0.5
# Resize image
img = resize_image(input_image.copy(), size_convert)
# Via yolov5-face
with torch.no_grad():
pred = model(img[None, :])[0]
# Apply NMS
det = non_max_suppression_face(pred, conf_thres, iou_thres)[0]
bboxs = np.int32(scale_coords(img.shape[1:], det[:, :4], input_image.shape).round().cpu().numpy())
landmarks = np.int32(scale_coords_landmarks(img.shape[1:], det[:, 5:15], input_image.shape).round().cpu().numpy())
return bboxs, landmarks
def get_feature(face_image, training = True):
# Convert to RGB
face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)
# Preprocessing image BGR
face_image = face_preprocess(face_image).to(device)
# Via model to get feature
with torch.no_grad():
if training:
emb_img_face = model_emb(face_image[None, :])[0].cpu().numpy()
else:
emb_img_face = model_emb(face_image[None, :]).cpu().numpy()
# Convert to array
images_emb = emb_img_face/np.linalg.norm(emb_img_face)
return images_emb
def read_features(root_fearure_path = "static/feature/face_features.npz"):
data = np.load(root_fearure_path, allow_pickle=True)
images_name = data["arr1"]
images_emb = data["arr2"]
return images_name, images_emb
def recognition(face_image, index):
global recognized_names # Use the global list to maintain recognized names
# Get feature from face
query_emb = (get_feature(face_image, training=False))
# Read features
images_names, images_embs = read_features()
scores = (query_emb @ images_embs.T)[0]
id_min = np.argmax(scores)
score = scores[id_min]
name = images_names[id_min]
# Set the caption based on the score
if score < 0.35:
caption = "UNKNOWN"
else:
caption = name
# Save the recognized face to the CSV file
if score >= 0.35:
if caption not in recognized_names:
recognized_names.append(caption)
# Save the recognized face to the CSV file
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d")
time = now.strftime("%H:%M:%S")
with open(csv_filename, 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow([caption, date, time])
print(f"Face {index}: Score: {score:.2f}, Name: {caption}")
return score, caption
def create_csv_file(filename):
with open(filename, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(["Name", "Date", "Time"])
# Create the CSV file if it doesn't exist
if not os.path.exists(csv_filename):
create_csv_file(csv_filename)
def recognize_from_images(image_folder):
if not os.path.exists(image_folder):
print(f"Image folder '{image_folder}' doesn't exist.")
return
for image_name in os.listdir(image_folder):
if image_name.endswith(("png", 'jpg', 'jpeg')):
image_path = os.path.join(image_folder, image_name)
input_image = cv2.imread(image_path)
# Get faces
bboxs, _ = get_face(input_image)
# Get boxes
for i, (x1, y1, x2, y2) in enumerate(bboxs):
face_image = input_image[y1:y2, x1:x2]
recognition(face_image, i)
# Function to check login credentials (Dummy function, replace with real logic)
def check_login(username, password):
return username == "admin" and password == "admin"
# Function to encode a file for download
def get_table_download_link(df):
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode()
href = f'<a href="data:file/csv;base64,{b64}" download="attendance.csv">Download CSV</a>'
return href
def main():
st.title("Face Recognition and Attendance System")
# Login System
if 'login_status' not in st.session_state:
st.session_state['login_status'] = False
if st.session_state['login_status']:
# Main Interface
uploaded_file = st.file_uploader("Upload an image for face recognition", type=["jpg", "jpeg", "png"])
if uploaded_file is not None:
# Convert the file to an opencv image.
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
opencv_image = cv2.imdecode(file_bytes, 1)
if st.button("Recognize Faces"):
# Recognize faces from the uploaded image
bboxs, _ = get_face(opencv_image)
for i, (x1, y1, x2, y2) in enumerate(bboxs):
face_image = opencv_image[y1:y2, x1:x2]
recognition(face_image, i)
st.image(opencv_image, channels="BGR", caption="Processed Image")
# Additional logic for face recognition using a laptop's camera
# This part of the code needs to be adapted based on your specific requirements and setup
if 'recognized_names' in globals():
# Show Attendance Table
if recognized_names:
st.table(pd.DataFrame(recognized_names, columns=["Name"]))
# Download link for CSV
st.markdown(get_table_download_link(pd.DataFrame(recognized_names, columns=["Name"])), unsafe_allow_html=True)
else:
# Login Page
username = st.text_input("Username")
password = st.text_input("Password", type="password")
if st.button("Login"):
if check_login(username, password):
st.session_state['login_status'] = True
else:
st.error("Incorrect Username/Password")
if __name__ == "__main__":
main()