Spaces:
Build error
Build error
| # import numpy as np | |
| # import cv2 | |
| # import streamlit as st | |
| # from tensorflow import keras | |
| # from keras.models import model_from_json | |
| # from tensorflow.keras.utils import img_to_array | |
| # from streamlit_webrtc import webrtc_streamer, VideoTransformerBase, RTCConfiguration, VideoProcessorBase, WebRtcMode | |
| import numpy as np | |
| import tensorflow as tf | |
| from PIL import Image | |
| import cv2 | |
| import streamlit as st | |
| from tensorflow import keras | |
| from keras.models import model_from_json | |
| from tensorflow.keras.utils import img_to_array | |
| from streamlit_webrtc import webrtc_streamer, VideoTransformerBase, RTCConfiguration, VideoProcessorBase, WebRtcMode | |
| # load model | |
| emotion_dict = {0:'angry', 1 :'happy', 2: 'neutral', 3:'sad', 4: 'surprise'} | |
| # load json and create model | |
| json_file = open('emotion_model1.json', 'r') | |
| loaded_model_json = json_file.read() | |
| json_file.close() | |
| classifier = model_from_json(loaded_model_json) | |
| # load weights into new model | |
| classifier.load_weights("emotion_model1.h5") | |
| #load face | |
| try: | |
| face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') | |
| except Exception: | |
| st.write("Error loading cascade classifiers") | |
| RTC_CONFIGURATION = RTCConfiguration({"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]}) | |
| class Faceemotion(VideoTransformerBase): | |
| def transform(self, frame): | |
| img = frame.to_ndarray(format="bgr24") | |
| #image gray | |
| img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) | |
| faces = face_cascade.detectMultiScale( | |
| image=img_gray, scaleFactor=1.3, minNeighbors=5) | |
| for (x, y, w, h) in faces: | |
| cv2.rectangle(img=img, pt1=(x, y), pt2=( | |
| x + w, y + h), color=(255, 0, 0), thickness=2) | |
| roi_gray = img_gray[y:y + h, x:x + w] | |
| roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA) | |
| if np.sum([roi_gray]) != 0: | |
| roi = roi_gray.astype('float') / 255.0 | |
| roi = img_to_array(roi) | |
| roi = np.expand_dims(roi, axis=0) | |
| prediction = classifier.predict(roi)[0] | |
| maxindex = int(np.argmax(prediction)) | |
| finalout = emotion_dict[maxindex] | |
| output = str(finalout) | |
| label_position = (x, y) | |
| cv2.putText(img, 'i', label_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) | |
| return img | |
| def generate_prediction(input_image): | |
| # img = frame.to_ndarray(format="bgr24") | |
| #image gray | |
| img = input_image | |
| img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) | |
| faces = face_cascade.detectMultiScale( | |
| image=img_gray, scaleFactor=1.3, minNeighbors=5) | |
| for (x, y, w, h) in faces: | |
| cv2.rectangle(img=img, pt1=(x, y), pt2=( | |
| x + w, y + h), color=(255, 0, 0), thickness=2) | |
| roi_gray = img_gray[y:y + h, x:x + w] | |
| roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA) | |
| if np.sum([roi_gray]) != 0: | |
| roi = roi_gray.astype('float') / 255.0 | |
| roi = img_to_array(roi) | |
| roi = np.expand_dims(roi, axis=0) | |
| prediction = classifier.predict(roi)[0] | |
| maxindex = int(np.argmax(prediction)) | |
| finalout = emotion_dict[maxindex] | |
| output = str(finalout) | |
| label_position = (x, y) | |
| cv2.putText(img, output, label_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) | |
| return img | |
| def main(): | |
| # Face Analysis Application # | |
| st.title(" Face Emotion Detection Application") | |
| activiteis = ["Home", "Webcam Face Detection", "By Images","About"] | |
| choice = st.sidebar.selectbox("Select Activity", activiteis) | |
| if choice == "Home": | |
| html_temp_home1 = """<div style="background-color:#6D7B8D;padding:10px"> | |
| <h3 style="color:yellow;text-align:center;"> Welcome to world of AI with Prince </h3> | |
| <h4 style="color:white;text-align:center;"> | |
| Face Emotion detection application using OpenCV, Custom CNN model and Streamlit.</h4> | |
| </div> | |
| </br>""" | |
| st.markdown(html_temp_home1, unsafe_allow_html=True) | |
| st.write(""" | |
| Real time face emotion recognization just by one click. | |
| """) | |
| elif choice == "Webcam Face Detection": | |
| st.header("Webcam Live Feed") | |
| st.write("Click on start to use webcam and detect your face emotion") | |
| webrtc_streamer(key="example", mode=WebRtcMode.SENDRECV, rtc_configuration=RTC_CONFIGURATION, | |
| video_processor_factory=Faceemotion) | |
| # st.video('https://www.youtube.com/watch?v=wyWmWaXapmI') | |
| elif choice == "By Images": | |
| st.header("Image Prediction App") | |
| uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"]) | |
| if uploaded_file is not None: | |
| image = np.array(Image.open(uploaded_file)) | |
| prediction = generate_prediction(image) | |
| st.image(prediction, use_column_width=True) | |
| elif choice == "About": | |
| st.subheader("About this app") | |
| html_temp_about1= """<div style="background-color:#6D7B8D;padding:10px"> | |
| <h4 style="color:white;text-align:center;"> | |
| Real time face emotion detection application using OpenCV, Custom Trained CNN model and Streamlit.</h4> | |
| </div> | |
| </br>""" | |
| st.markdown(html_temp_about1, unsafe_allow_html=True) | |
| html_temp4 = """ | |
| <div style="background-color:#98AFC7;padding:10px"> | |
| <h4 style="color:white;text-align:center;">Thanks for Visiting</h4> | |
| </div> | |
| <br></br> | |
| <br></br>""" | |
| st.markdown(html_temp4, unsafe_allow_html=True) | |
| else: | |
| pass | |
| if __name__ == "__main__": | |
| main() | |