Face_Attendance / app.py
PrashanthB461's picture
Update app.py
9127e77 verified
import gradio as gr
import cv2
import numpy as np
from PIL import Image
import base64
import requests
import json
import logging
from datetime import datetime
from utils import WorkerRecognitionSystem
import os
from dotenv import load_dotenv
import tempfile
import pandas as pd
import io
import warnings
# Suppress TensorFlow warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
warnings.filterwarnings('ignore')
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# Load environment variables
load_dotenv()
SALESFORCE_TOKEN = os.getenv("SALESFORCE_TOKEN", "your_salesforce_oauth_token")
HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN", "your_hugging_face_api_token")
# Initialize recognition system with error handling
try:
system = WorkerRecognitionSystem()
system.load_worker_database()
logger.info("System initialized successfully!")
except Exception as e:
logger.error(f"System initialization failed: {str(e)}")
raise RuntimeError(f"System initialization failed: {str(e)}")
def process_image(input_image):
"""Process image with enhanced error handling"""
try:
frame = cv2.cvtColor(np.array(input_image), cv2.COLOR_RGB2BGR)
frame = system.preprocess_frame(frame)
results = system.process_frame(frame)
output_images = []
output_text = []
for result in results:
worker_id = result.get("worker_id", "Unknown")
confidence = result.get("confidence", 0)
entry_time = result.get("entry_time", datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
if result.get("image_base64", ""):
img_data = base64.b64decode(result["image_base64"])
img = Image.open(io.BytesIO(img_data))
output_images.append((np.array(img), f"Worker {worker_id} - Confidence: {confidence:.2%}"))
output_text.append(f"Worker ID: {worker_id}, Confidence: {confidence:.2%}, Time: {entry_time}")
return "\n".join(output_text) if output_text else "No faces detected", output_images or None
except Exception as e:
logger.error(f"Image processing error: {str(e)}")
return f"Error: {str(e)}", None
def process_input(file_input):
"""Handle both image and video uploads"""
if not file_input:
return "Please upload a file", None
try:
# Save to temp file
suffix = os.path.splitext(file_input.name)[1]
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp:
tmp.write(file_input.read())
tmp_path = tmp.name
# Try as image first
try:
img = Image.open(tmp_path)
return process_image(img)
except:
# Fall back to video processing
return process_video(tmp_path)
except Exception as e:
logger.error(f"Input processing failed: {str(e)}")
return f"Processing failed: {str(e)}", None
finally:
if 'tmp_path' in locals() and os.path.exists(tmp_path):
os.unlink(tmp_path)
# Gradio Interface
with gr.Blocks(title="Worker Recognition System") as demo:
gr.Markdown("# Worker Recognition System")
gr.Markdown("Upload an image or video to recognize workers at a construction site.")
with gr.Row():
file_input = gr.File(label="Upload Image/Video", file_types=["image", "video"])
with gr.Row():
text_output = gr.Textbox(label="Recognition Results", interactive=False)
gallery_output = gr.Gallery(label="Detected Faces")
file_input.change(
fn=process_input,
inputs=file_input,
outputs=[text_output, gallery_output]
)
if __name__ == "__main__":
demo.launch()