Spaces:
Runtime error
Runtime error
| import streamlit as st | |
| from PIL import Image | |
| from deepface import DeepFace | |
| import cv2 | |
| import numpy as np | |
| # Set background color to black | |
| st.markdown( | |
| """ | |
| <style> | |
| body { | |
| background-color: #000000; | |
| color: white; | |
| } | |
| </style> | |
| """, | |
| unsafe_allow_html=True | |
| ) | |
| # Center-align the heading | |
| st.markdown( | |
| """ | |
| <h1 style='text-align: center;'>DeepFace - Age, Gender, Race, Expression Detection Program</h1> | |
| """, | |
| unsafe_allow_html=True | |
| ) | |
| # Add the description | |
| st.markdown( | |
| """ | |
| <p style='text-align: center;'>Application Created by:<br> | |
| Shahzad Ahmed (2K20/IT/114)<br> | |
| Aziz Dil Khan (2K20/IT/31)<br> | |
| Muhammad Ahsan Qureshi (2K20/IT/68)</p> | |
| """, | |
| unsafe_allow_html=True | |
| ) | |
| def drawFace(image, results): | |
| for result in results: | |
| x = result['region']['x'] | |
| y = result['region']['y'] | |
| h = result['region']['h'] | |
| w = result['region']['w'] | |
| age = result['age'] | |
| gender = 'M' if result['gender']['Man'] > result['gender']['Woman'] else 'F' | |
| dominant_emotion = result['dominant_emotion'] | |
| dominant_race = result['dominant_race'] | |
| description = f'{age}{gender} - {dominant_emotion}' | |
| cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2) | |
| cv2.putText(image, description, (x, y - 10), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 3) | |
| cv2.putText(image, dominant_race, (x, y + h + 30), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 3) | |
| return image | |
| st.title("DeepFace Analysis") | |
| # Add file upload option | |
| uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"]) | |
| if uploaded_file is not None: | |
| # Read the uploaded image | |
| image = Image.open(uploaded_file) | |
| img_path = "temp_image.jpg" # Save the image temporarily | |
| # Save the image temporarily for OpenCV processing | |
| image.save(img_path) | |
| # Analyze the image using DeepFace | |
| results = DeepFace.analyze(img_path=img_path, actions=['age', 'gender', 'race', 'emotion']) | |
| # Read the image using OpenCV | |
| cv_image = cv2.imread(img_path) | |
| # Convert BGR to RGB color channel ordering | |
| cv_image_rgb = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB) | |
| # Adjust color balance | |
| cv_image_rgb_adjusted = cv2.cvtColor(cv_image_rgb, cv2.COLOR_RGB2LAB) | |
| avg_a = np.average(cv_image_rgb_adjusted[:, :, 1]) | |
| avg_b = np.average(cv_image_rgb_adjusted[:, :, 2]) | |
| cv_image_rgb_adjusted[:, :, 1] = cv_image_rgb_adjusted[:, :, 1] - ((np.average(cv_image_rgb_adjusted[:, :, 1]) - 128) * (cv_image_rgb_adjusted[:, :, 0] / 255.0) * 1.1) | |
| cv_image_rgb_adjusted[:, :, 2] = cv_image_rgb_adjusted[:, :, 2] - ((np.average(cv_image_rgb_adjusted[:, :, 2]) - 128) * (cv_image_rgb_adjusted[:, :, 0] / 255.0) * 1.1) | |
| cv_image_rgb_adjusted = cv2.cvtColor(cv_image_rgb_adjusted, cv2.COLOR_LAB2RGB) | |
| annotated_image = drawFace(cv_image_rgb_adjusted, results) | |
| # Display image and text side by side | |
| col1, col2 = st.columns(2) | |
| col1.image(annotated_image, caption='Annotated Image', use_column_width=True) | |
| col2.write("Results:") | |
| col2.write(results) |