Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import numpy as np | |
| import tensorflow as tf | |
| from tensorflow.keras.saving import load_model | |
| import cv2 | |
| MODEL_PATH = "fer_surprise_softmax.keras" | |
| # -------------------------- | |
| # 1) ๋ชจ๋ธ ๋ก๋ฉ | |
| # -------------------------- | |
| print("๐ ๋ชจ๋ธ ๋ก๋ฉ ์ค...") | |
| model = load_model(MODEL_PATH, compile=False) | |
| print("โ ๋ชจ๋ธ ๋ก๋ฉ ์๋ฃ!") | |
| # -------------------------- | |
| # 2) ์์ธก ํจ์ (์ค์๊ฐ ์น์บ ) | |
| # -------------------------- | |
| emotion_labels = ["angry", "disgust", "fear", "happy", "neutral", "sad", "surprise"] | |
| def predict(frame, hr): | |
| if frame is None: | |
| return None, "์นด๋ฉ๋ผ ํ๋ ์ ์์" | |
| # ์ผ๊ตด ํ์ง (๋๋น ๊ฐํ + grayscale) | |
| gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) | |
| resized = cv2.resize(gray, (96, 96)) | |
| img = resized.reshape(1, 96, 96, 1).astype("float32") / 255.0 | |
| # ๋ชจ๋ธ ์์ธก | |
| preds = model.predict(img, verbose=0)[0] | |
| top_idx = int(np.argmax(preds)) | |
| top_label = emotion_labels[top_idx] | |
| surprise_score = float(preds[6]) | |
| # Surprise ์ฌ๋ถ binary | |
| surprise_flag = "Surprised ๐ฒ" if top_label == "surprise" else "Not Surprise ๐" | |
| # ์ถ๋ ฅ ๊ตฌ์ฑ | |
| result_text = ( | |
| f"๐ง Top Emotion: {top_label}\n" | |
| f"๐ฒ Surprise Probability: {surprise_score:.3f}\n" | |
| f"โค๏ธ Heart Rate: {hr} BPM\n" | |
| f"๐ Surprise Detection: {surprise_flag}" | |
| ) | |
| return frame, result_text | |
| # -------------------------- | |
| # 3) Gradio UI | |
| # -------------------------- | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# ๐ฒ Real-Time Surprise Detector + โค๏ธ Heart Rate Monitor") | |
| with gr.Row(): | |
| webcam = gr.Image( | |
| sources=["webcam"], | |
| streaming=True, | |
| label="Webcam Input", | |
| height=380 | |
| ) | |
| hr = gr.Number(label="Heart Rate (from Arduino)", value=0) | |
| out_image = gr.Image(label="Live Feed") | |
| out_text = gr.Textbox(label="Analysis Result") | |
| webcam.stream(predict, inputs=[webcam, hr], outputs=[out_image, out_text]) | |
| demo.launch() | |