Clocksp commited on
Commit
543d9f5
·
verified ·
1 Parent(s): 9250696

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -38
app.py CHANGED
@@ -11,64 +11,54 @@ emotions = ['HAPPY', 'SAD', 'SURPRISED']
11
  with open('model.pkl', 'rb') as f:
12
  model = pickle.load(f)
13
 
14
- def predict_emotion(image, draw_option):
15
- if image is None:
16
- return {"Error": 1.0}, None, "⚠ Please upload an image."
17
-
18
  # Convert PIL image to OpenCV format
19
  img = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
20
 
21
  # Extract landmarks
22
- face_landmarks = get_face_landmarks(img, draw=(draw_option == "Show Landmarks"), static_image_mode=True)
23
 
24
  if face_landmarks is None or len(face_landmarks) == 0:
25
  return {"No face detected": 1.0}, None, "⚠ No face detected in the image."
26
 
27
- # Predict emotion
28
  output = model.predict([face_landmarks])
29
  predicted_emotion = emotions[int(output[0])]
30
 
31
- # Confidence scores
32
  if hasattr(model, "predict_proba"):
33
  probs = model.predict_proba([face_landmarks])[0]
34
  confidence_dict = {emotions[i]: float(probs[i]) for i in range(len(emotions))}
35
  else:
36
  confidence_dict = {predicted_emotion: 1.0}
37
 
38
- # Annotate image only if user wants and face is detected
39
- # if draw_option == "Show Landmarks" or draw_option == "Show Emotion":
40
- # if draw_option == "Show Emotion":
41
- # # cv2.putText(img, predicted_emotion, (10, img.shape[0] - 10),
42
- # # cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
43
- # annotated_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
44
- # else:
45
- # annotated_img = None # No annotation if user chooses "None"
46
 
47
- # return confidence_dict, annotated_img, f" Detected emotion: {predicted_emotion}"
48
 
49
  # Example images
50
- examples = [["examples/happy.png"], ["examples/sad.png"], ["examples/surprised.png"]]
51
-
52
- # Custom UI with Radio Toggle
53
- with gr.Blocks(theme="default") as demo:
54
- gr.Markdown("## 🧠 Emotion Detector")
55
- gr.Markdown("Upload an image or use webcam to detect emotions (HAPPY, SAD, SURPRISED).")
56
-
57
- with gr.Row():
58
- with gr.Column():
59
- image_input = gr.Image(type="pil", label="Upload Image or Use Webcam", sources=["upload", "webcam"])
60
- draw_option = gr.Radio(choices=["None", "Show Landmarks", "Show Emotion"], value="Show Emotion",
61
- label="Annotation Mode", interactive=True)
62
- with gr.Column():
63
- label_output = gr.Label(num_top_classes=3, label="Predicted Emotion & Confidence")
64
- image_output = gr.Image(type="numpy", label="Annotated Image")
65
- status_output = gr.Textbox(label="Status", interactive=False)
66
-
67
- # Real-time update with both image and annotation mode
68
- image_input.change(fn=predict_emotion, inputs=[image_input, draw_option], outputs=[label_output, image_output, status_output])
69
- draw_option.change(fn=predict_emotion, inputs=[image_input, draw_option], outputs=[label_output, image_output, status_output])
70
-
71
- gr.Examples(examples=examples, inputs=[image_input])
72
 
73
  if __name__ == "__main__":
74
  demo.launch()
 
11
  with open('model.pkl', 'rb') as f:
12
  model = pickle.load(f)
13
 
14
+ def predict_emotion(image, show_landmarks):
 
 
 
15
  # Convert PIL image to OpenCV format
16
  img = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
17
 
18
  # Extract landmarks
19
+ face_landmarks = get_face_landmarks(img, draw=show_landmarks, static_image_mode=True)
20
 
21
  if face_landmarks is None or len(face_landmarks) == 0:
22
  return {"No face detected": 1.0}, None, "⚠ No face detected in the image."
23
 
24
+ # Predict emotion and probabilities
25
  output = model.predict([face_landmarks])
26
  predicted_emotion = emotions[int(output[0])]
27
 
 
28
  if hasattr(model, "predict_proba"):
29
  probs = model.predict_proba([face_landmarks])[0]
30
  confidence_dict = {emotions[i]: float(probs[i]) for i in range(len(emotions))}
31
  else:
32
  confidence_dict = {predicted_emotion: 1.0}
33
 
34
+ # If user wants landmarks, `get_face_landmarks` draws them, otherwise original image
35
+ if show_landmarks:
36
+ annotated_img = img
37
+ else:
38
+ annotated_img = None # Don't show any image if landmarks not requested
 
 
 
39
 
40
+ return confidence_dict, cv2.cvtColor(annotated_img, cv2.COLOR_BGR2RGB) if annotated_img is not None else None, f" Detected emotion: {predicted_emotion}"
41
 
42
  # Example images
43
+ examples = [["examples/happy.png", True], ["examples/sad.png", True], ["examples/surprised.png", True]]
44
+
45
+ # Gradio Interface
46
+ demo = gr.Interface(
47
+ fn=predict_emotion,
48
+ inputs=[
49
+ gr.Image(type="pil", label="Upload Image or Use Webcam", sources=["upload", "webcam"]),
50
+ gr.Checkbox(label="Show Landmarks", value=False)
51
+ ],
52
+ outputs=[
53
+ gr.Label(num_top_classes=3, label="Predicted Emotion & Confidence"),
54
+ gr.Image(type="numpy", label="Landmark Image"), # Only shown if checkbox is True
55
+ gr.Textbox(label="Status", interactive=False)
56
+ ],
57
+ title="Emotion Detector",
58
+ description="Upload an image or use webcam to detect emotions (HAPPY, SAD, SURPRISED). Check 'Show Landmarks' to visualize facial landmarks.",
59
+ examples=examples,
60
+ theme="default"
61
+ )
 
 
 
62
 
63
  if __name__ == "__main__":
64
  demo.launch()