Spaces:
Sleeping
Sleeping
modify prediction method, add webcam and image prediction
Browse files- __pycache__/prediction.cpython-39.pyc +0 -0
- app.py +6 -2
- prediction.py +91 -0
__pycache__/prediction.cpython-39.pyc
CHANGED
|
Binary files a/__pycache__/prediction.cpython-39.pyc and b/__pycache__/prediction.cpython-39.pyc differ
|
|
|
app.py
CHANGED
|
@@ -2,6 +2,7 @@ import streamlit as st
|
|
| 2 |
from streamlit_option_menu import option_menu
|
| 3 |
from streamlit_webrtc import webrtc_streamer, VideoTransformerBase
|
| 4 |
from prediction import predict_emotion
|
|
|
|
| 5 |
import eda
|
| 6 |
import model_result
|
| 7 |
|
|
@@ -27,7 +28,8 @@ with st.sidebar:
|
|
| 27 |
"Distribution",
|
| 28 |
"Image Sample",
|
| 29 |
"Model Result",
|
| 30 |
-
"Classification",
|
|
|
|
| 31 |
],
|
| 32 |
icons=["bar-chart", "link-45deg", "code-square"],
|
| 33 |
menu_icon="cast",
|
|
@@ -40,5 +42,7 @@ elif selected == "Image Sample":
|
|
| 40 |
eda.samples()
|
| 41 |
elif selected == "Model Result":
|
| 42 |
model_result.report()
|
| 43 |
-
elif selected == "Classification":
|
| 44 |
main() # Call the main function for emotion detection
|
|
|
|
|
|
|
|
|
| 2 |
from streamlit_option_menu import option_menu
|
| 3 |
from streamlit_webrtc import webrtc_streamer, VideoTransformerBase
|
| 4 |
from prediction import predict_emotion
|
| 5 |
+
import prediction
|
| 6 |
import eda
|
| 7 |
import model_result
|
| 8 |
|
|
|
|
| 28 |
"Distribution",
|
| 29 |
"Image Sample",
|
| 30 |
"Model Result",
|
| 31 |
+
"Webcam Classification",
|
| 32 |
+
"Image Classification"
|
| 33 |
],
|
| 34 |
icons=["bar-chart", "link-45deg", "code-square"],
|
| 35 |
menu_icon="cast",
|
|
|
|
| 42 |
eda.samples()
|
| 43 |
elif selected == "Model Result":
|
| 44 |
model_result.report()
|
| 45 |
+
elif selected == "Webcam Classification":
|
| 46 |
main() # Call the main function for emotion detection
|
| 47 |
+
elif selected == "Image Classification":
|
| 48 |
+
prediction.image_prediction()
|
prediction.py
CHANGED
|
@@ -1,9 +1,13 @@
|
|
|
|
|
| 1 |
import cv2
|
| 2 |
import numpy as np
|
|
|
|
| 3 |
from tensorflow.keras.preprocessing import image
|
|
|
|
| 4 |
from keras.models import load_model
|
| 5 |
import pandas as pd
|
| 6 |
|
|
|
|
| 7 |
# Load pre-trained emotion classification model
|
| 8 |
emotion_classification_model = load_model('./model/model_fine_tune.h5') # Replace with actual path
|
| 9 |
|
|
@@ -52,3 +56,90 @@ def predict_emotion(frame):
|
|
| 52 |
cv2.putText(img, f'Emotion: {pred_class_single[0]}', (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)
|
| 53 |
|
| 54 |
return img # Return the annotated frame
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
import cv2
|
| 3 |
import numpy as np
|
| 4 |
+
import matplotlib.pyplot as plt
|
| 5 |
from tensorflow.keras.preprocessing import image
|
| 6 |
+
from tensorflow.keras.models import load_model
|
| 7 |
from keras.models import load_model
|
| 8 |
import pandas as pd
|
| 9 |
|
| 10 |
+
|
| 11 |
# Load pre-trained emotion classification model
|
| 12 |
emotion_classification_model = load_model('./model/model_fine_tune.h5') # Replace with actual path
|
| 13 |
|
|
|
|
| 56 |
cv2.putText(img, f'Emotion: {pred_class_single[0]}', (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)
|
| 57 |
|
| 58 |
return img # Return the annotated frame
|
| 59 |
+
|
| 60 |
+
def image_prediction():
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
emotion_classification_model = load_model('./model/model_fine_tune.h5')
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
url = "https://lh3.googleusercontent.com/0e8O0JPOp_ydq7wqv6kgYz6UpF9w_INfnOLIhnJZBEHFcWIygkuLa3SVghhGYgE0XWzQYBPb6wb1eQFN0pVIAYlzEeNojYuCWg=s0"
|
| 67 |
+
|
| 68 |
+
def img_url(url):
|
| 69 |
+
res = request.urlopen(url).read()
|
| 70 |
+
img_ori = image.load_img(BytesIO(res))
|
| 71 |
+
img = image.load_img(BytesIO(res), target_size=(48, 48), keep_aspect_ratio=True)
|
| 72 |
+
show_predict(img, img_ori)
|
| 73 |
+
|
| 74 |
+
def show_predict(img, img_ori):
|
| 75 |
+
col1, col2 = st.columns(2)
|
| 76 |
+
fig = plt.figure()
|
| 77 |
+
plt.imshow(img_ori)
|
| 78 |
+
plt.axis('off')
|
| 79 |
+
col1.pyplot(fig)
|
| 80 |
+
|
| 81 |
+
img_array = image.img_to_array(img)
|
| 82 |
+
img_array = np.expand_dims(img_array, axis=0)
|
| 83 |
+
img_array /= (255*117) # Normalize the image
|
| 84 |
+
|
| 85 |
+
inf_pred_single = emotion_classification_model.predict(img_array)
|
| 86 |
+
|
| 87 |
+
data_inf_single = []
|
| 88 |
+
|
| 89 |
+
rank = []
|
| 90 |
+
|
| 91 |
+
for i in inf_pred_single[0]:
|
| 92 |
+
value = i * 100
|
| 93 |
+
rank.append(value)
|
| 94 |
+
data_inf_single.append(f'{value.round(2)}%')
|
| 95 |
+
|
| 96 |
+
rank = (-np.array(rank)).argsort()[:2]
|
| 97 |
+
|
| 98 |
+
pred_class_single = pd.DataFrame(class_labels).loc[rank][0].tolist()
|
| 99 |
+
|
| 100 |
+
prediction_result_single = pd.DataFrame(columns=["angry", "disgusted", "fearful", "happy", "neutral", "sad", "surprised"])
|
| 101 |
+
prediction_result_single.loc[len(prediction_result_single)] = data_inf_single
|
| 102 |
+
|
| 103 |
+
prediction_result_single
|
| 104 |
+
|
| 105 |
+
st.markdown("""
|
| 106 |
+
<style>
|
| 107 |
+
.big-font {
|
| 108 |
+
font-size:30px !important;
|
| 109 |
+
}
|
| 110 |
+
</style>
|
| 111 |
+
""", unsafe_allow_html=True)
|
| 112 |
+
|
| 113 |
+
col2.write('Prediction Class:')
|
| 114 |
+
col2.markdown(f'<p class="big-font">{pred_class_single[0].capitalize()}</p>', unsafe_allow_html=True)
|
| 115 |
+
|
| 116 |
+
col2.dataframe(prediction_result_single.set_index(prediction_result_single.columns[0]), use_container_width=True)
|
| 117 |
+
|
| 118 |
+
class_labels = ["angry", "disgusted", "fearful", "happy", "neutral", "sad", "surprised"]
|
| 119 |
+
|
| 120 |
+
st.write('Insert Image URL Below (Make sure face is centered and fitted)')
|
| 121 |
+
|
| 122 |
+
st.markdown('[Example Image](https://cdn.idntimes.com/content-images/community/2021/12/whatsapp-image-2021-12-02-at-190446-8ecf63e1fa6b5c8c5e9ac43034bc86d3-c563813ea99f16a795ad4c53af10881a_600x400.jpeg)')
|
| 123 |
+
|
| 124 |
+
col1, col2 = st.columns((9,1))
|
| 125 |
+
url_input = col1.text_input(label="Image Links")
|
| 126 |
+
|
| 127 |
+
st.markdown(
|
| 128 |
+
"""
|
| 129 |
+
<style>
|
| 130 |
+
button {
|
| 131 |
+
height: auto;
|
| 132 |
+
margin-top: 28px !important;
|
| 133 |
+
padding-left: 24px !important;
|
| 134 |
+
padding-right: 24px !important;
|
| 135 |
+
}
|
| 136 |
+
</style>
|
| 137 |
+
""",
|
| 138 |
+
unsafe_allow_html=True,
|
| 139 |
+
)
|
| 140 |
+
pred_button = col2.button(label="Predict")
|
| 141 |
+
|
| 142 |
+
if pred_button:
|
| 143 |
+
img_url(url_input)
|
| 144 |
+
else:
|
| 145 |
+
img_url(url)
|