FarazAli34 commited on
Commit
425297c
·
verified ·
1 Parent(s): ada7725

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -20
app.py CHANGED
@@ -3,32 +3,43 @@ import tensorflow as tf
3
  import cv2
4
  import numpy as np
5
 
6
- # Load the trained model
7
- model = tf.keras.models.load_model("FER_DATA.keras")
 
8
 
9
- # Define a function to predict emotion
 
 
 
 
 
 
10
  def predict_emotion(image):
11
- # Convert image to grayscale
12
  image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
13
  image = cv2.resize(image, (48, 48))
14
  image = image / 255.0
15
  image = np.expand_dims(image, axis=-1)
16
- image = np.expand_dims(image, axis=0)
 
 
 
 
 
 
 
 
 
17
 
18
- # Predict emotion
19
- prediction = model.predict(image)
20
- emotion_index = np.argmax(prediction)
 
 
21
  emotions = ['Angry', 'Happy', 'Sad', 'Neutral']
22
- return emotions[emotion_index]
23
-
24
- # Create Gradio interface
25
- iface = gr.Interface(
26
- fn=predict_emotion,
27
- inputs=gr.Image(type="numpy", shape=(224, 224)),
28
- outputs="text",
29
- title="MoodSync - Emotion Detection",
30
- description="Upload an image of a face to detect the emotion!"
31
- )
32
-
33
- # Launch the app
34
  iface.launch()
 
3
  import cv2
4
  import numpy as np
5
 
6
+ # Load the TFLite model
7
+ interpreter = tf.lite.Interpreter(model_path="FER_DATA.tflite")
8
+ interpreter.allocate_tensors()
9
 
10
+ # Function to get input and output tensors
11
+ def get_input_output_tensors(interpreter):
12
+ input_details = interpreter.get_input_details()
13
+ output_details = interpreter.get_output_details()
14
+ return input_details, output_details
15
+
16
+ # Function to predict emotion
17
  def predict_emotion(image):
18
+ # Preprocess the image
19
  image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
20
  image = cv2.resize(image, (48, 48))
21
  image = image / 255.0
22
  image = np.expand_dims(image, axis=-1)
23
+ image = np.expand_dims(image, axis=0).astype(np.float32)
24
+
25
+ # Get input and output tensors
26
+ input_details, output_details = get_input_output_tensors(interpreter)
27
+
28
+ # Set the input tensor
29
+ interpreter.set_tensor(input_details[0]['index'], image)
30
+
31
+ # Run the inference
32
+ interpreter.invoke()
33
 
34
+ # Get the output
35
+ output = interpreter.get_tensor(output_details[0]['index'])
36
+ emotion = np.argmax(output)
37
+
38
+ # Define the emotions list
39
  emotions = ['Angry', 'Happy', 'Sad', 'Neutral']
40
+ return emotions[emotion]
41
+
42
+ # Define the Gradio interface
43
+ iface = gr.Interface(fn=predict_emotion, inputs=gr.Image(shape=(224, 224)), outputs="text", title="MoodSync - Emotion Detection")
44
+
 
 
 
 
 
 
 
45
  iface.launch()