Spaces:
Runtime error
Runtime error
Initial Commit
Browse files
app.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import cv2
|
| 3 |
+
import tensorflow as tf
|
| 4 |
+
import numpy as np
|
| 5 |
+
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
| 6 |
+
|
| 7 |
+
# Load the pre-trained model
|
| 8 |
+
model = tf.keras.models.load_model('/Trained_Model.h5')
|
| 9 |
+
|
| 10 |
+
# Define the emotion labels
|
| 11 |
+
emotion_labels = {
|
| 12 |
+
0: 'Angry',
|
| 13 |
+
1: 'Disgust',
|
| 14 |
+
2: 'Fear',
|
| 15 |
+
3: 'Happy',
|
| 16 |
+
4: 'Neutral',
|
| 17 |
+
5: 'Sad',
|
| 18 |
+
6: 'Surprise'
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
# Create the image generator for preprocessing
|
| 22 |
+
img_gen = ImageDataGenerator(rescale=1./255)
|
| 23 |
+
|
| 24 |
+
# Define the function to predict emotions
|
| 25 |
+
def predict_emotion(file):
|
| 26 |
+
# Load the image or video
|
| 27 |
+
cap = cv2.VideoCapture(file.name)
|
| 28 |
+
if cap.isOpened():
|
| 29 |
+
ret, frame = cap.read()
|
| 30 |
+
# Check if it's an image or video
|
| 31 |
+
if frame is not None:
|
| 32 |
+
# Preprocess the image
|
| 33 |
+
img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
| 34 |
+
img = cv2.resize(img, (48, 48))
|
| 35 |
+
img = np.expand_dims(img, axis=-1)
|
| 36 |
+
img = np.expand_dims(img, axis=0)
|
| 37 |
+
img = img.astype('float32')
|
| 38 |
+
img = img_gen.standardize(img)
|
| 39 |
+
# Predict the emotion
|
| 40 |
+
prediction = model.predict(img)
|
| 41 |
+
label = emotion_labels[np.argmax(prediction)]
|
| 42 |
+
else:
|
| 43 |
+
label = "No frames found in the video"
|
| 44 |
+
else:
|
| 45 |
+
label = "Could not open the file"
|
| 46 |
+
return label
|
| 47 |
+
|
| 48 |
+
# Create the Gradio interface
|
| 49 |
+
input_type = gr.inputs.File(label="Upload an image or video to predict emotions")
|
| 50 |
+
output_type = gr.outputs.Textbox(label="Predicted emotion")
|
| 51 |
+
title = "Emotion Detection"
|
| 52 |
+
description = "Upload an image or video to predict the corresponding emotion"
|
| 53 |
+
iface = gr.Interface(fn=predict_emotion, inputs=input_type, outputs=output_type, title=title, description=description)
|
| 54 |
+
if __name__ == '__main__':
|
| 55 |
+
iface.launch(inline=False)
|