Clocksp commited on
Commit
7cea7e8
·
verified ·
1 Parent(s): 39890a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -65
app.py CHANGED
@@ -4,13 +4,12 @@ import numpy as np
4
  import pickle
5
  from functools import lru_cache
6
 
7
- # ---- Import your landmarks util ----
8
  try:
9
  from util import get_face_landmarks
10
  except Exception as e:
11
  raise ImportError(
12
  "Could not import 'get_face_landmarks' from util.py. "
13
- "Make sure util.py exists and defines get_face_landmarks(img, draw: bool, static_image_mode: bool)."
14
  ) from e
15
 
16
 
@@ -38,75 +37,43 @@ def predict_emotion(image, draw_toggle):
38
  image: PIL.Image (from gr.Image with type='pil')
39
  draw_toggle: 'OFF' or 'ON'
40
  """
41
- # Input validation
42
  if image is None:
43
  return {"Status": 1.0}, None, "Please upload an image."
44
 
45
  draw = (draw_toggle == "ON")
46
 
47
  # Convert PIL -> OpenCV BGR
48
- try:
49
- img_rgb = np.array(image) # PIL -> RGB ndarray
50
- if img_rgb.ndim == 2: # grayscale to 3-ch
51
- img_rgb = cv2.cvtColor(img_rgb, cv2.COLOR_GRAY2RGB)
52
- img_bgr = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR)
53
- except Exception:
54
- return {"Status": 1.0}, None, "⚠ Could not read the image. Please try a different one."
55
-
56
- # Extract landmarks (your util may also draw on img internally when draw=True)
57
- try:
58
- landmarks = get_face_landmarks(img_bgr, draw=draw, static_image_mode=True)
59
- except Exception as e:
60
- return {"Status": 1.0}, img_rgb, f"⚠ Landmark extraction failed: {e}"
61
-
62
- # Handle no-face case (do NOT annotate; return original image)
63
  if landmarks is None or (hasattr(landmarks, "__len__") and len(landmarks) == 0):
64
- return {"No face detected": 1.0}, img_rgb, "No face detected in the image."
65
 
66
  # Load model
67
- try:
68
- model = load_model()
69
- except FileNotFoundError:
70
- return {"Status": 1.0}, img_rgb, "⚠ model.pkl not found in repo root."
71
- except Exception as e:
72
- return {"Status": 1.0}, img_rgb, f"⚠ Failed to load model: {e}"
73
 
74
  # Predict
75
- try:
76
- output = model.predict([landmarks])
77
- pred_idx = int(output[0])
78
- pred_label = EMOTIONS[pred_idx] if 0 <= pred_idx < len(EMOTIONS) else str(pred_idx)
79
-
80
- # Confidence/probabilities if available
81
- if hasattr(model, "predict_proba"):
82
- probs = model.predict_proba([landmarks])[0]
83
- confidence = {EMOTIONS[i]: float(probs[i]) for i in range(len(EMOTIONS))}
84
- else:
85
- confidence = {pred_label: 1.0}
86
-
87
- # Always draw predicted text on the copy we return (but ONLY if a face exists)
88
- img_annot = img_bgr.copy()
89
- try:
90
- cv2.putText(
91
- img_annot,
92
- pred_label,
93
- (10, img_annot.shape[0] - 10),
94
- cv2.FONT_HERSHEY_SIMPLEX,
95
- 1.0,
96
- (0, 255, 0),
97
- 2,
98
- cv2.LINE_AA,
99
- )
100
- except Exception:
101
- # If drawing fails, just return the original image
102
- img_annot = img_bgr
103
 
104
- img_out = cv2.cvtColor(img_annot, cv2.COLOR_BGR2RGB)
105
- status = f"✅ Detected emotion: {pred_label}"
106
- return confidence, img_out, status
107
 
108
- except Exception as e:
109
- return {"Status": 1.0}, img_rgb, f"⚠ Inference failed: {e}"
110
 
111
 
112
  # ---- Gradio UI ----
@@ -117,7 +84,7 @@ with gr.Blocks(theme="default") as demo:
117
  with gr.Column(scale=1):
118
  image_input = gr.Image(
119
  type="pil",
120
- label="Upload Image or Use Webcam",
121
  sources=["upload", "webcam"],
122
  interactive=True,
123
  )
@@ -127,16 +94,13 @@ with gr.Blocks(theme="default") as demo:
127
  label="Draw Landmarks",
128
  interactive=True,
129
  )
130
- gr.Markdown(
131
- "Tip: Switch **Draw Landmarks** ON to visualize key points (if your `util.get_face_landmarks` draws them)."
132
- )
133
 
134
  with gr.Column(scale=1):
135
  label_output = gr.Label(num_top_classes=3, label="Predicted Emotion & Confidence")
136
- image_output = gr.Image(type="numpy", label="Annotated Image")
137
  status_output = gr.Textbox(label="Status", interactive=False)
138
 
139
- # Examples (ensure these files exist in /examples)
140
  gr.Examples(
141
  examples=[
142
  ["examples/happy.png", "OFF"],
@@ -147,7 +111,7 @@ with gr.Blocks(theme="default") as demo:
147
  label="Try examples",
148
  )
149
 
150
- # Real-time: changing either the image or the toggle re-runs inference automatically
151
  image_input.change(
152
  fn=predict_emotion,
153
  inputs=[image_input, draw_toggle],
 
4
  import pickle
5
  from functools import lru_cache
6
 
7
+
8
  try:
9
  from util import get_face_landmarks
10
  except Exception as e:
11
  raise ImportError(
12
  "Could not import 'get_face_landmarks' from util.py. "
 
13
  ) from e
14
 
15
 
 
37
  image: PIL.Image (from gr.Image with type='pil')
38
  draw_toggle: 'OFF' or 'ON'
39
  """
 
40
  if image is None:
41
  return {"Status": 1.0}, None, "Please upload an image."
42
 
43
  draw = (draw_toggle == "ON")
44
 
45
  # Convert PIL -> OpenCV BGR
46
+ img_rgb = np.array(image)
47
+ if img_rgb.ndim == 2:
48
+ img_rgb = cv2.cvtColor(img_rgb, cv2.COLOR_GRAY2RGB)
49
+ img_bgr = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR)
50
+
51
+ # Extract landmarks
52
+ landmarks = get_face_landmarks(img_bgr, draw=draw, static_image_mode=True)
53
+
54
+ # Handle no-face case
 
 
 
 
 
 
55
  if landmarks is None or (hasattr(landmarks, "__len__") and len(landmarks) == 0):
56
+ return {"No face detected": 1.0}, img_rgb, "No face detected in the image."
57
 
58
  # Load model
59
+ model = load_model()
 
 
 
 
 
60
 
61
  # Predict
62
+ output = model.predict([landmarks])
63
+ pred_idx = int(output[0])
64
+ pred_label = EMOTIONS[pred_idx] if 0 <= pred_idx < len(EMOTIONS) else str(pred_idx)
65
+
66
+ if hasattr(model, "predict_proba"):
67
+ probs = model.predict_proba([landmarks])[0]
68
+ confidence = {EMOTIONS[i]: float(probs[i]) for i in range(len(EMOTIONS))}
69
+ else:
70
+ confidence = {pred_label: 1.0}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
+ # If draw_toggle is ON, landmarks drawn on img_bgr by util
73
+ img_out = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB) if draw else img_rgb
 
74
 
75
+ status = f" Detected emotion: {pred_label}"
76
+ return confidence, img_out, status
77
 
78
 
79
  # ---- Gradio UI ----
 
84
  with gr.Column(scale=1):
85
  image_input = gr.Image(
86
  type="pil",
87
+ label="Examples",
88
  sources=["upload", "webcam"],
89
  interactive=True,
90
  )
 
94
  label="Draw Landmarks",
95
  interactive=True,
96
  )
97
+
 
 
98
 
99
  with gr.Column(scale=1):
100
  label_output = gr.Label(num_top_classes=3, label="Predicted Emotion & Confidence")
101
+ image_output = gr.Image(type="numpy", label="Image Output")
102
  status_output = gr.Textbox(label="Status", interactive=False)
103
 
 
104
  gr.Examples(
105
  examples=[
106
  ["examples/happy.png", "OFF"],
 
111
  label="Try examples",
112
  )
113
 
114
+ # Real-time: change triggers inference
115
  image_input.change(
116
  fn=predict_emotion,
117
  inputs=[image_input, draw_toggle],