clementBE commited on
Commit
0ef5858
·
verified ·
1 Parent(s): 866cbd6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -66
app.py CHANGED
@@ -1,4 +1,4 @@
1
- import os, zipfile, tempfile, requests, io
2
  import numpy as np
3
  import pandas as pd
4
  from PIL import Image
@@ -8,10 +8,9 @@ from torchvision import transforms
8
  from torchvision.models import resnet50, ResNet50_Weights
9
  from sklearn.cluster import MiniBatchKMeans
10
  import matplotlib.pyplot as plt
11
- from datetime import datetime
12
  import gradio as gr
13
- from deepface import DeepFace
14
  import cv2
 
15
 
16
  # ---------------------------
17
  # Device
@@ -61,22 +60,54 @@ def get_dominant_color(image,num_colors=5):
61
  return dominant_color, hex_color
62
 
63
  # ---------------------------
64
- # Gender normalization
65
  # ---------------------------
66
- def normalize_gender(raw_gender):
67
- if raw_gender is None:
68
- return "Inconnu"
69
- g = str(raw_gender).lower()
70
- if g in ["man", "male", "m"]:
71
- return "Homme"
72
- elif g in ["woman", "female", "f"]:
73
- return "Femme"
74
- elif g in ["homme"]:
75
- return "Homme"
76
- elif g in ["femme"]:
77
- return "Femme"
78
- else:
79
- return "Inconnu"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
  # ---------------------------
82
  # Core analysis
@@ -85,7 +116,6 @@ def classify_zip_and_analyze_color(zip_file):
85
  results = []
86
  images_list = []
87
  zip_name = os.path.splitext(os.path.basename(zip_file.name))[0]
88
- date_str = datetime.now().strftime("%Y%m%d")
89
 
90
  with tempfile.TemporaryDirectory() as tmpdir:
91
  with zipfile.ZipFile(zip_file.name,'r') as zip_ref:
@@ -113,32 +143,9 @@ def classify_zip_and_analyze_color(zip_file):
113
  rgb, hex_color = get_dominant_color(image)
114
  basic_color = closest_basic_color(rgb)
115
 
116
- # Face analysis
117
- faces_data = []
118
- try:
119
- img_cv2 = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
120
- detected_faces = DeepFace.analyze(
121
- img_cv2, actions=["age","gender","emotion"], enforce_detection=False
122
- )
123
-
124
- # Ensure we have a list
125
- if isinstance(detected_faces, dict):
126
- detected_faces = [detected_faces]
127
-
128
- for f in detected_faces:
129
- gender_fr = normalize_gender(f.get("gender") or f.get("dominant_gender"))
130
- faces_data.append({
131
- "age": f.get("age", -1),
132
- "gender": gender_fr,
133
- "emotion": f.get("dominant_emotion", "Unknown")
134
- })
135
- except:
136
- faces_data = []
137
-
138
- faces_str = "; ".join([
139
- f"Age: {face['age']}, Gender: {face['gender']}, Emotion: {face['emotion']}"
140
- for face in faces_data
141
- ])
142
 
143
  results.append((
144
  fname,
@@ -149,22 +156,19 @@ def classify_zip_and_analyze_color(zip_file):
149
  faces_str
150
  ))
151
 
152
- # Create DataFrame
153
  df = pd.DataFrame(results, columns=["Filename","Top 3 Predictions","Confidence","Dominant Color","Basic Color","Face Info"])
154
- out_xlsx = os.path.join(tempfile.gettempdir(), f"{zip_name}_{date_str}_results.xlsx")
155
  df.to_excel(out_xlsx,index=False)
156
 
157
  # ---------------------------
158
  # Plots
159
  # ---------------------------
160
- # Color frequency
161
  fig1, ax1 = plt.subplots()
162
  color_counts = df["Basic Color"].value_counts()
163
  ax1.bar(color_counts.index, color_counts.values, color="skyblue")
164
  ax1.set_title("Basic Color Frequency"); ax1.set_ylabel("Count")
165
  buf1 = io.BytesIO(); plt.savefig(buf1, format="png"); plt.close(fig1); buf1.seek(0); plot1_img = Image.open(buf1)
166
 
167
- # Top prediction distribution
168
  fig2, ax2 = plt.subplots()
169
  preds_flat = []
170
  for p in df["Top 3 Predictions"]: preds_flat.extend(p.split(", "))
@@ -173,26 +177,14 @@ def classify_zip_and_analyze_color(zip_file):
173
  ax2.set_title("Top Prediction Distribution"); ax2.set_xlabel("Count")
174
  buf2 = io.BytesIO(); plt.savefig(buf2, format="png", bbox_inches="tight"); plt.close(fig2); buf2.seek(0); plot2_img = Image.open(buf2)
175
 
176
- # Gender and age
177
- ages_male = [int(f.split(", ")[0].split(": ")[1]) for row in df["Face Info"] for f in row.split("; ") if "Homme" in f]
178
- ages_female = [int(f.split(", ")[0].split(": ")[1]) for row in df["Face Info"] for f in row.split("; ") if "Femme" in f]
179
-
180
- gender_counts = {"Homme": len(ages_male), "Femme": len(ages_female)}
181
-
182
  # Gender distribution
 
183
  fig3, ax3 = plt.subplots()
184
- ax3.bar(gender_counts.keys(), gender_counts.values(), color=["lightblue","pink"])
185
  ax3.set_title("Gender Distribution"); ax3.set_ylabel("Count")
186
  buf3 = io.BytesIO(); plt.savefig(buf3, format="png"); plt.close(fig3); buf3.seek(0); plot3_img = Image.open(buf3)
187
 
188
- # Age distribution
189
- fig4, ax4 = plt.subplots()
190
- bins = range(0,101,5)
191
- ax4.hist([ages_male, ages_female], bins=bins, color=["lightblue","pink"], label=["Homme","Femme"], edgecolor="black")
192
- ax4.set_title("Age Distribution by Gender"); ax4.set_xlabel("Age"); ax4.set_ylabel("Count"); ax4.legend()
193
- buf4 = io.BytesIO(); plt.savefig(buf4, format="png"); plt.close(fig4); buf4.seek(0); plot4_img = Image.open(buf4)
194
-
195
- return df, images_list, out_xlsx, plot1_img, plot2_img, plot3_img, plot4_img
196
 
197
  # ---------------------------
198
  # Gradio interface
@@ -208,12 +200,11 @@ with gr.Blocks() as demo:
208
  plot1 = gr.Image(label="Basic Color Frequency")
209
  plot2 = gr.Image(label="Top Prediction Distribution")
210
  plot3 = gr.Image(label="Gender Distribution")
211
- plot4 = gr.Image(label="Age Distribution by Gender")
212
 
213
  analyze_btn.click(
214
  classify_zip_and_analyze_color,
215
  inputs=uploaded_zip,
216
- outputs=[output_df, image_gallery, download_file, plot1, plot2, plot3, plot4]
217
  )
218
 
219
  demo.launch(server_name="0.0.0.0", server_port=7860)
 
1
+ import os, zipfile, tempfile, io
2
  import numpy as np
3
  import pandas as pd
4
  from PIL import Image
 
8
  from torchvision.models import resnet50, ResNet50_Weights
9
  from sklearn.cluster import MiniBatchKMeans
10
  import matplotlib.pyplot as plt
 
11
  import gradio as gr
 
12
  import cv2
13
+ import requests
14
 
15
  # ---------------------------
16
  # Device
 
60
  return dominant_color, hex_color
61
 
62
  # ---------------------------
63
+ # OpenCV DNN Face + Gender
64
  # ---------------------------
65
+ os.makedirs("models", exist_ok=True)
66
+
67
+ # Face detection model
68
+ FACE_PROTO = "models/deploy.prototxt"
69
+ FACE_MODEL = "models/res10_300x300_ssd_iter_140000_fp16.caffemodel"
70
+ if not os.path.exists(FACE_PROTO):
71
+ r = requests.get("https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt"); open(FACE_PROTO,"wb").write(r.content)
72
+ if not os.path.exists(FACE_MODEL):
73
+ r = requests.get("https://raw.githubusercontent.com/opencv/opencv_3rdparty/master/res10_300x300_ssd_iter_140000_fp16.caffemodel"); open(FACE_MODEL,"wb").write(r.content)
74
+
75
+ # Gender model
76
+ GENDER_PROTO = "models/deploy_gender.prototxt"
77
+ GENDER_MODEL = "models/gender_net.caffemodel"
78
+ if not os.path.exists(GENDER_PROTO):
79
+ r = requests.get("https://raw.githubusercontent.com/spmallick/learnopencv/master/AgeGender/deploy_gender.prototxt"); open(GENDER_PROTO,"wb").write(r.content)
80
+ if not os.path.exists(GENDER_MODEL):
81
+ r = requests.get("https://raw.githubusercontent.com/spmallick/learnopencv/master/AgeGender/gender_net.caffemodel"); open(GENDER_MODEL,"wb").write(r.content)
82
+
83
+ face_net = cv2.dnn.readNet(FACE_MODEL, FACE_PROTO)
84
+ gender_net = cv2.dnn.readNet(GENDER_MODEL, GENDER_PROTO)
85
+ GENDER_LIST = ["Homme","Femme"]
86
+
87
+ def detect_faces_and_gender(image):
88
+ img = np.array(image)[:, :, ::-1] # PIL RGB -> BGR
89
+ h, w = img.shape[:2]
90
+ blob = cv2.dnn.blobFromImage(img, 1.0, (300,300), [104,117,123], swapRB=False)
91
+ face_net.setInput(blob)
92
+ detections = face_net.forward()
93
+ faces_data = []
94
+
95
+ for i in range(detections.shape[2]):
96
+ confidence = detections[0,0,i,2]
97
+ if confidence > 0.5:
98
+ box = detections[0,0,i,3:7] * np.array([w,h,w,h])
99
+ x1,y1,x2,y2 = box.astype(int)
100
+ x1,y1,x2,y2 = max(0,x1), max(0,y1), min(w,x2), min(h,y2)
101
+ face_img = img[y1:y2, x1:x2]
102
+ if face_img.size == 0:
103
+ continue
104
+ face_blob = cv2.dnn.blobFromImage(face_img, 1.0, (227,227),
105
+ [78.4263377603, 87.7689143744, 114.895847746], swapRB=False)
106
+ gender_net.setInput(face_blob)
107
+ gender_preds = gender_net.forward()
108
+ gender = GENDER_LIST[gender_preds[0].argmax()]
109
+ faces_data.append({"bbox":(x1,y1,x2,y2),"gender":gender})
110
+ return faces_data
111
 
112
  # ---------------------------
113
  # Core analysis
 
116
  results = []
117
  images_list = []
118
  zip_name = os.path.splitext(os.path.basename(zip_file.name))[0]
 
119
 
120
  with tempfile.TemporaryDirectory() as tmpdir:
121
  with zipfile.ZipFile(zip_file.name,'r') as zip_ref:
 
143
  rgb, hex_color = get_dominant_color(image)
144
  basic_color = closest_basic_color(rgb)
145
 
146
+ # Face + gender detection
147
+ faces_data = detect_faces_and_gender(image)
148
+ faces_str = "; ".join([f"Gender: {f['gender']}" for f in faces_data])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
 
150
  results.append((
151
  fname,
 
156
  faces_str
157
  ))
158
 
 
159
  df = pd.DataFrame(results, columns=["Filename","Top 3 Predictions","Confidence","Dominant Color","Basic Color","Face Info"])
160
+ out_xlsx = os.path.join(tempfile.gettempdir(), f"{zip_name}_results.xlsx")
161
  df.to_excel(out_xlsx,index=False)
162
 
163
  # ---------------------------
164
  # Plots
165
  # ---------------------------
 
166
  fig1, ax1 = plt.subplots()
167
  color_counts = df["Basic Color"].value_counts()
168
  ax1.bar(color_counts.index, color_counts.values, color="skyblue")
169
  ax1.set_title("Basic Color Frequency"); ax1.set_ylabel("Count")
170
  buf1 = io.BytesIO(); plt.savefig(buf1, format="png"); plt.close(fig1); buf1.seek(0); plot1_img = Image.open(buf1)
171
 
 
172
  fig2, ax2 = plt.subplots()
173
  preds_flat = []
174
  for p in df["Top 3 Predictions"]: preds_flat.extend(p.split(", "))
 
177
  ax2.set_title("Top Prediction Distribution"); ax2.set_xlabel("Count")
178
  buf2 = io.BytesIO(); plt.savefig(buf2, format="png", bbox_inches="tight"); plt.close(fig2); buf2.seek(0); plot2_img = Image.open(buf2)
179
 
 
 
 
 
 
 
180
  # Gender distribution
181
+ gender_counts = [df["Face Info"].str.count("Homme").sum(), df["Face Info"].str.count("Femme").sum()]
182
  fig3, ax3 = plt.subplots()
183
+ ax3.bar(["Homme","Femme"], gender_counts, color=["lightblue","pink"])
184
  ax3.set_title("Gender Distribution"); ax3.set_ylabel("Count")
185
  buf3 = io.BytesIO(); plt.savefig(buf3, format="png"); plt.close(fig3); buf3.seek(0); plot3_img = Image.open(buf3)
186
 
187
+ return df, images_list, out_xlsx, plot1_img, plot2_img, plot3_img
 
 
 
 
 
 
 
188
 
189
  # ---------------------------
190
  # Gradio interface
 
200
  plot1 = gr.Image(label="Basic Color Frequency")
201
  plot2 = gr.Image(label="Top Prediction Distribution")
202
  plot3 = gr.Image(label="Gender Distribution")
 
203
 
204
  analyze_btn.click(
205
  classify_zip_and_analyze_color,
206
  inputs=uploaded_zip,
207
+ outputs=[output_df, image_gallery, download_file, plot1, plot2, plot3]
208
  )
209
 
210
  demo.launch(server_name="0.0.0.0", server_port=7860)