ShahzadAhmed commited on
Commit
e13efbe
·
1 Parent(s): 0654e2c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -0
app.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from PIL import Image
3
+ from deepface import DeepFace
4
+ import cv2
5
+ import numpy as np
6
+
7
+
8
+ # Set background color to black
9
+ st.markdown(
10
+ """
11
+ <style>
12
+ body {
13
+ background-color: #000000;
14
+ color: white;
15
+ }
16
+ </style>
17
+ """,
18
+ unsafe_allow_html=True
19
+ )
20
+
21
+ # Center-align the heading
22
+ st.markdown(
23
+ """
24
+ <h1 style='text-align: center;'>DeepFace - Age, Gender, Race, Expression Detection Program</h1>
25
+ """,
26
+ unsafe_allow_html=True
27
+ )
28
+
29
+ # Add the description
30
+ st.markdown(
31
+ """
32
+ <p style='text-align: center;'>Application Created by:<br>
33
+ Shahzad Ahmed (2K20/IT/114)<br>
34
+ Aziz Dil Khan (2K20/IT/31)<br>
35
+ Muhammad Ahsan Qureshi (2K20/IT/68)</p>
36
+ """,
37
+ unsafe_allow_html=True
38
+ )
39
+
40
+ def drawFace(image, results):
41
+ for result in results:
42
+ x = result['region']['x']
43
+ y = result['region']['y']
44
+ h = result['region']['h']
45
+ w = result['region']['w']
46
+ age = result['age']
47
+ gender = 'M' if result['gender']['Man'] > result['gender']['Woman'] else 'F'
48
+ dominant_emotion = result['dominant_emotion']
49
+ dominant_race = result['dominant_race']
50
+ description = f'{age}{gender} - {dominant_emotion}'
51
+
52
+ cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
53
+ cv2.putText(image, description, (x, y - 10), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 3)
54
+ cv2.putText(image, dominant_race, (x, y + h + 30), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 3)
55
+
56
+ return image
57
+ st.title("DeepFace Analysis")
58
+
59
+ # Add file upload option
60
+ uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
61
+
62
+ if uploaded_file is not None:
63
+ # Read the uploaded image
64
+ image = Image.open(uploaded_file)
65
+ img_path = "temp_image.jpg" # Save the image temporarily
66
+
67
+ # Save the image temporarily for OpenCV processing
68
+ image.save(img_path)
69
+
70
+ # Analyze the image using DeepFace
71
+ results = DeepFace.analyze(img_path=img_path, actions=['age', 'gender', 'race', 'emotion'])
72
+
73
+
74
+ # Read the image using OpenCV
75
+ cv_image = cv2.imread(img_path)
76
+ # Convert BGR to RGB color channel ordering
77
+ cv_image_rgb = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
78
+
79
+ # Adjust color balance
80
+ cv_image_rgb_adjusted = cv2.cvtColor(cv_image_rgb, cv2.COLOR_RGB2LAB)
81
+ avg_a = np.average(cv_image_rgb_adjusted[:, :, 1])
82
+ avg_b = np.average(cv_image_rgb_adjusted[:, :, 2])
83
+ cv_image_rgb_adjusted[:, :, 1] = cv_image_rgb_adjusted[:, :, 1] - ((np.average(cv_image_rgb_adjusted[:, :, 1]) - 128) * (cv_image_rgb_adjusted[:, :, 0] / 255.0) * 1.1)
84
+ cv_image_rgb_adjusted[:, :, 2] = cv_image_rgb_adjusted[:, :, 2] - ((np.average(cv_image_rgb_adjusted[:, :, 2]) - 128) * (cv_image_rgb_adjusted[:, :, 0] / 255.0) * 1.1)
85
+ cv_image_rgb_adjusted = cv2.cvtColor(cv_image_rgb_adjusted, cv2.COLOR_LAB2RGB)
86
+
87
+ annotated_image = drawFace(cv_image_rgb_adjusted, results)
88
+
89
+
90
+ # Display image and text side by side
91
+ col1, col2 = st.columns(2)
92
+ col1.image(annotated_image, caption='Annotated Image', use_column_width=True)
93
+ col2.write("Results:")
94
+ col2.write(results)