Spaces:
Runtime error
Runtime error
Delete app2.py
Browse files
app2.py
DELETED
|
@@ -1,56 +0,0 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
import tensorflow as tf
|
| 3 |
-
import numpy as np
|
| 4 |
-
from PIL import Image # Pillow for image processing
|
| 5 |
-
import os
|
| 6 |
-
|
| 7 |
-
# Hugging Face Space Link: https://huggingface.co/spaces/YourUsername/YourSpaceName
|
| 8 |
-
|
| 9 |
-
# Load the trained Keras model
|
| 10 |
-
model = tf.keras.models.load_model('facial_emotion_model.h5')
|
| 11 |
-
|
| 12 |
-
# Define the class labels explicitly, as train_generator is not available in deployment
|
| 13 |
-
# These labels should match the order your model was trained on.
|
| 14 |
-
class_labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18']
|
| 15 |
-
|
| 16 |
-
# Define target image size used during training
|
| 17 |
-
TARGET_SIZE = (128, 128)
|
| 18 |
-
|
| 19 |
-
def preprocess_image(image) -> np.ndarray:
|
| 20 |
-
"""Preprocesses the input image for model prediction."""
|
| 21 |
-
# Resize image to target size
|
| 22 |
-
image = image.resize(TARGET_SIZE)
|
| 23 |
-
# Convert to numpy array
|
| 24 |
-
image_array = np.array(image)
|
| 25 |
-
# Normalize pixel values to [0, 1]
|
| 26 |
-
image_array = image_array / 255.0
|
| 27 |
-
# Expand dimensions to create a batch (1, height, width, channels)
|
| 28 |
-
image_array = np.expand_dims(image_array, axis=0)
|
| 29 |
-
return image_array
|
| 30 |
-
|
| 31 |
-
def predict_emotion(image) -> dict:
|
| 32 |
-
"""Predicts the emotion from a facial image and returns probabilities."""
|
| 33 |
-
if image is None:
|
| 34 |
-
return {label: 0.0 for label in class_labels}
|
| 35 |
-
|
| 36 |
-
# Preprocess the image
|
| 37 |
-
processed_image = preprocess_image(image)
|
| 38 |
-
|
| 39 |
-
# Make prediction
|
| 40 |
-
predictions = model.predict(processed_image)[0]
|
| 41 |
-
|
| 42 |
-
# Create a dictionary of emotion labels and their probabilities
|
| 43 |
-
results = {class_labels[i]: float(predictions[i]) for i in range(len(class_labels))}
|
| 44 |
-
return results
|
| 45 |
-
|
| 46 |
-
# Create the Gradio interface
|
| 47 |
-
iface = gr.Interface(
|
| 48 |
-
fn=predict_emotion,
|
| 49 |
-
inputs=gr.Image(type="pil", label="Upload Face Image"),
|
| 50 |
-
outputs=gr.Label(num_top_classes=len(class_labels)),
|
| 51 |
-
title="Facial Emotion Recognition",
|
| 52 |
-
description="Upload an image of a face to get emotion predictions."
|
| 53 |
-
)
|
| 54 |
-
|
| 55 |
-
# Launch the interface
|
| 56 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|