3v324v23 commited on
Commit
dd980a0
·
1 Parent(s): d11e0da

add new prediction method

Browse files
__pycache__/model_result.cpython-39.pyc CHANGED
Binary files a/__pycache__/model_result.cpython-39.pyc and b/__pycache__/model_result.cpython-39.pyc differ
 
__pycache__/prediction.cpython-39.pyc CHANGED
Binary files a/__pycache__/prediction.cpython-39.pyc and b/__pycache__/prediction.cpython-39.pyc differ
 
app.py CHANGED
@@ -1,14 +1,24 @@
1
  import streamlit as st
 
 
 
2
  import eda
3
  import model_result
4
- import prediction
5
- from streamlit_option_menu import option_menu
6
-
7
-
8
 
9
  st.sidebar.header("Emotion Classification")
10
  st.title("Facial Emotion Classification")
11
 
 
 
 
 
 
 
 
 
 
 
 
12
  with st.sidebar:
13
  st.write("Ediashta Revindra - FTDS-020")
14
  selected = option_menu(
@@ -29,6 +39,6 @@ if selected == "Distribution":
29
  elif selected == "Image Sample":
30
  eda.samples()
31
  elif selected == "Model Result":
32
- model_result.report()
33
  elif selected == "Classification":
34
- prediction.predict()
 
1
  import streamlit as st
2
+ from streamlit_option_menu import option_menu
3
+ from streamlit_webrtc import webrtc_streamer, VideoTransformerBase
4
+ from prediction import predict_emotion
5
  import eda
6
  import model_result
 
 
 
 
7
 
8
  st.sidebar.header("Emotion Classification")
9
  st.title("Facial Emotion Classification")
10
 
11
+ class EmotionDetectionTransformer(VideoTransformerBase):
12
+ def transform(self, frame):
13
+ annotated_frame = predict_emotion(frame)
14
+ return annotated_frame
15
+
16
+ def main():
17
+ st.title('Emotion Detection App')
18
+ st.write("Press Start")
19
+
20
+ webrtc_streamer(key="example", video_transformer_factory=EmotionDetectionTransformer)
21
+
22
  with st.sidebar:
23
  st.write("Ediashta Revindra - FTDS-020")
24
  selected = option_menu(
 
39
  elif selected == "Image Sample":
40
  eda.samples()
41
  elif selected == "Model Result":
42
+ model_result.report()
43
  elif selected == "Classification":
44
+ main() # Call the main function for emotion detection
model/haarcascade_frontalface_default.xml ADDED
The diff for this file is too large to render. See raw diff
 
prediction.py CHANGED
@@ -1,45 +1,41 @@
1
- import streamlit as st
2
- import pandas as pd
3
  import numpy as np
4
- import pickle
5
- import tensorflow as tf
6
- import matplotlib.pyplot as plt
7
- from tensorflow.keras.layers import Dense, Input, concatenate
8
- from tensorflow.keras.models import load_model
9
  from tensorflow.keras.preprocessing import image
10
- from PIL import Image
11
- from urllib import request
12
- from io import BytesIO
13
-
14
-
15
- def predict():
16
-
17
- emotion_classification_model = load_model('./model/model_fine_tune.h5')
18
-
19
-
20
- url = "https://lh3.googleusercontent.com/0e8O0JPOp_ydq7wqv6kgYz6UpF9w_INfnOLIhnJZBEHFcWIygkuLa3SVghhGYgE0XWzQYBPb6wb1eQFN0pVIAYlzEeNojYuCWg=s0"
21
-
22
- def img_url(url):
23
- res = request.urlopen(url).read()
24
- img_ori = image.load_img(BytesIO(res))
25
- img = image.load_img(BytesIO(res), target_size=(48, 48), keep_aspect_ratio=True)
26
- show_predict(img, img_ori)
27
-
28
- def show_predict(img, img_ori):
29
- col1, col2 = st.columns(2)
30
- fig = plt.figure()
31
- plt.imshow(img_ori)
32
- plt.axis('off')
33
- col1.pyplot(fig)
34
-
35
- img_array = image.img_to_array(img)
36
  img_array = np.expand_dims(img_array, axis=0)
37
  img_array /= (255*117) # Normalize the image
38
 
 
39
  inf_pred_single = emotion_classification_model.predict(img_array)
40
-
41
- data_inf_single = []
42
 
 
 
 
43
  rank = []
44
 
45
  for i in inf_pred_single[0]:
@@ -48,57 +44,11 @@ def predict():
48
  data_inf_single.append(f'{value.round(2)}%')
49
 
50
  rank = (-np.array(rank)).argsort()[:2]
51
-
52
  pred_class_single = pd.DataFrame(class_labels).loc[rank][0].tolist()
53
 
54
- prediction_result_single = pd.DataFrame(columns=["angry", "disgusted", "fearful", "happy", "neutral", "sad", "surprised"])
55
- prediction_result_single.loc[len(prediction_result_single)] = data_inf_single
56
-
57
- prediction_result_single
58
-
59
- st.markdown("""
60
- <style>
61
- .big-font {
62
- font-size:30px !important;
63
- }
64
- </style>
65
- """, unsafe_allow_html=True)
66
-
67
- col2.write('Prediction Class:')
68
- col2.markdown(f'<p class="big-font">{pred_class_single[0].capitalize()}</p>', unsafe_allow_html=True)
69
-
70
- col2.dataframe(prediction_result_single.set_index(prediction_result_single.columns[0]), use_container_width=True)
71
-
72
- class_labels = ["angry", "disgusted", "fearful", "happy", "neutral", "sad", "surprised"]
73
-
74
- st.write('Insert Image URL Below (Make sure face is centered and fitted)')
75
-
76
- st.markdown('[Example Image](https://cdn.idntimes.com/content-images/community/2021/12/whatsapp-image-2021-12-02-at-190446-8ecf63e1fa6b5c8c5e9ac43034bc86d3-c563813ea99f16a795ad4c53af10881a_600x400.jpeg)')
77
-
78
- col1, col2 = st.columns((9,1))
79
- url_input = col1.text_input(label="Image Links")
80
-
81
- st.markdown(
82
- """
83
- <style>
84
- button {
85
- height: auto;
86
- margin-top: 28px !important;
87
- padding-left: 24px !important;
88
- padding-right: 24px !important;
89
- }
90
- </style>
91
- """,
92
- unsafe_allow_html=True,
93
- )
94
- pred_button = col2.button(label="Predict")
95
-
96
- if pred_button:
97
- img_url(url_input)
98
- else:
99
- img_url(url)
100
-
101
-
102
-
103
- if __name__ == "__main__":
104
- predict()
 
1
+ import cv2
 
2
  import numpy as np
 
 
 
 
 
3
  from tensorflow.keras.preprocessing import image
4
+ from keras.models import load_model
5
+ import pandas as pd
6
+
7
+ # Load pre-trained emotion classification model
8
+ emotion_classification_model = load_model('.\model\model_fine_tune.h5') # Replace with actual path
9
+
10
+ # Load Haarcascades face detection classifier
11
+ face_cascade = cv2.CascadeClassifier('.\model\haarcascade_frontalface_default.xml')
12
+
13
+ class_labels = ["angry", "disgusted", "fearful", "happy", "neutral", "sad", "surprised"]
14
+
15
+ def predict_emotion(frame):
16
+ img = frame.to_ndarray(format="bgr24")
17
+
18
+ gray_frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
19
+
20
+ # Detect faces
21
+ faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
22
+
23
+ # Process each detected face
24
+ for (x, y, w, h) in faces:
25
+ face_img = img[y:y+h, x:x+w]
26
+
27
+ # Preprocess the face image
28
+ resized_face_img = cv2.resize(face_img, (48, 48))
29
+ img_array = image.img_to_array(resized_face_img)
30
  img_array = np.expand_dims(img_array, axis=0)
31
  img_array /= (255*117) # Normalize the image
32
 
33
+ # Perform emotion prediction using the loaded model
34
  inf_pred_single = emotion_classification_model.predict(img_array)
 
 
35
 
36
+ max_pred_single = np.argsort(inf_pred_single[0])[-2:][::-1]
37
+
38
+ data_inf_single = []
39
  rank = []
40
 
41
  for i in inf_pred_single[0]:
 
44
  data_inf_single.append(f'{value.round(2)}%')
45
 
46
  rank = (-np.array(rank)).argsort()[:2]
47
+
48
  pred_class_single = pd.DataFrame(class_labels).loc[rank][0].tolist()
49
 
50
+ # Draw bounding box and emotion label on the frame
51
+ cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
52
+ cv2.putText(img, f'Emotion: {pred_class_single[0]}', (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)
53
+
54
+ return img # Return the annotated frame
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -6,4 +6,5 @@ Pillow
6
  scikit-learn==1.2.2
7
  feature_engine
8
  streamlit_option_menu
 
9
  tensorflow ==2.13.0
 
6
  scikit-learn==1.2.2
7
  feature_engine
8
  streamlit_option_menu
9
+ streamlit-webrtc
10
  tensorflow ==2.13.0