AyoAgbaje commited on
Commit
5eab21d
·
verified ·
1 Parent(s): 3927f88

Update app.py

Browse files

Sad update to depressed

Files changed (1) hide show
  1. app.py +43 -43
app.py CHANGED
@@ -1,44 +1,44 @@
1
- import gradio as gr
2
- import os
3
- import numpy as np
4
- import tensorflow as tf
5
- import keras
6
- import keras_cv
7
- from keras.models import load_model
8
- import cv2
9
-
10
-
11
- def image_predict(img_):
12
- model = load_model('efficientnet_b0.keras')
13
- img = cv2.cvtColor(img_, cv2.COLOR_BGR2RGB)
14
- img = cv2.resize(img, dsize = [224, 224])
15
- img = img / 255.0
16
- img = np.expand_dims(img, axis = 0)
17
-
18
- pred = model.predict(img, verbose = 1)
19
- pred = np.argmax(pred, axis = 1)
20
-
21
- classes = ['angry', 'happy', 'neutral', 'sad', 'suprised', 'tired']
22
- if pred == 0:
23
- answer = f"Facial Expression detected is: {classes[0].capitalize()}"
24
- elif pred == 1:
25
- answer = f"Facial Expression detected is: {classes[1].capitalize()}"
26
- elif pred == 2:
27
- answer = f"Facial Expression detected is: {classes[2].capitalize()}"
28
- elif pred == 3:
29
- answer = f"Facial Expression detected is: {classes[3].capitalize()}"
30
- elif pred == 4:
31
- answer = f"Facial Expression detected is: {classes[4].capitalize()}"
32
- elif pred == 5:
33
- answer = f"Facial Expression detected is: {classes[5].capitalize()}"
34
-
35
- return answer
36
-
37
-
38
- with gr.Blocks() as demo:
39
- image_ = gr.Image(label = 'Input Image to be predicted')
40
- output = gr.Textbox(label = 'Prediction')
41
- btn = gr.Button('Predict')
42
- btn.click(fn = image_predict, inputs = [image_], outputs = output)
43
-
44
  demo.launch(share = False)
 
1
+ import gradio as gr
2
+ import os
3
+ import numpy as np
4
+ import tensorflow as tf
5
+ import keras
6
+ import keras_cv
7
+ from keras.models import load_model
8
+ import cv2
9
+
10
+
11
+ def image_predict(img_):
12
+ model = load_model('efficientnet_b0.keras')
13
+ img = cv2.cvtColor(img_, cv2.COLOR_BGR2RGB)
14
+ img = cv2.resize(img, dsize = [224, 224])
15
+ img = img / 255.0
16
+ img = np.expand_dims(img, axis = 0)
17
+
18
+ pred = model.predict(img, verbose = 1)
19
+ pred = np.argmax(pred, axis = 1)
20
+
21
+ classes = ['angry', 'happy', 'neutral', 'sad', 'suprised', 'tired']
22
+ if pred == 0:
23
+ answer = f"Facial Expression detected is: {classes[0].capitalize()}"
24
+ elif pred == 1:
25
+ answer = f"Facial Expression detected is: {classes[1].capitalize()}"
26
+ elif pred == 2:
27
+ answer = f"Facial Expression detected is: {classes[2].capitalize()}"
28
+ elif pred == 3:
29
+ answer = f"Facial Expression detected is: Depressed"
30
+ elif pred == 4:
31
+ answer = f"Facial Expression detected is: {classes[4].capitalize()}"
32
+ elif pred == 5:
33
+ answer = f"Facial Expression detected is: {classes[5].capitalize()}"
34
+
35
+ return answer
36
+
37
+
38
+ with gr.Blocks() as demo:
39
+ image_ = gr.Image(label = 'Input Image to be predicted')
40
+ output = gr.Textbox(label = 'Prediction')
41
+ btn = gr.Button('Predict')
42
+ btn.click(fn = image_predict, inputs = [image_], outputs = output)
43
+
44
  demo.launch(share = False)