awais0300's picture
Update app.py
a7cf946 verified
import gradio as gr
import numpy as np
from PIL import Image
from tensorflow.keras.models import load_model
# Load your trained model
model = load_model("model.h5")
# Define your class labels
class_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
# Function to preprocess and predict the emotion
def predict_expression(image):
if image is None:
return "No image provided."
# Convert to grayscale
gray_image = image.convert("L")
# Resize to match model input
resized_image = gray_image.resize((48, 48))
# Normalize and reshape
img_array = np.array(resized_image) / 255.0
img_array = img_array.reshape(1, 48, 48, 1)
# Predict
prediction = model.predict(img_array)
predicted_label = class_labels[np.argmax(prediction)]
return predicted_label
# Toggle function to switch between upload and camera
def toggle_input(method):
return (
gr.update(visible=(method == "Upload")),
gr.update(visible=(method == "Camera"))
)
# Gradio interface
with gr.Blocks() as app:
gr.Markdown("## ๐Ÿ˜ƒ Facial Expression Recognition")
input_method = gr.Radio(["Upload", "Camera"], label="Select Input Method", value="Upload")
upload_image = gr.Image(label="Upload Image", type="pil", visible=True)
camera_image = gr.Image(label="Camera Input", type="pil", visible=False, sources=["webcam"])
predict_button = gr.Button("Predict Expression")
output_label = gr.Label(label="Predicted Expression")
input_method.change(fn=toggle_input, inputs=input_method, outputs=[upload_image, camera_image])
predict_button.click(
fn=predict_expression,
inputs=[upload_image, camera_image],
outputs=output_label,
preprocess=False # We'll handle the logic manually
)
app.launch()