Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import numpy as np | |
| import cv2 | |
| from tensorflow.keras.models import load_model | |
| import mlflow | |
| import subprocess | |
| import os | |
| import webbrowser | |
| import matplotlib.pyplot as plt | |
| import shap | |
| from sklearn.model_selection import train_test_split | |
| from codecarbon import EmissionsTracker | |
| from mlflow import keras | |
| import random | |
| import streamlit_shadcn_ui as ui | |
| from PIL import Image | |
| # Start carbon tracking | |
| tracker = EmissionsTracker() | |
| tracker.start() | |
| # Function to play audio | |
| # Load and preprocess image | |
| def load_and_preprocess_image(image): | |
| # Convert to grayscale | |
| image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | |
| # Resize to 48x48 (as the model expects) | |
| image = cv2.resize(image, (48, 48)) | |
| # Normalize pixel values | |
| image = image / 255.0 | |
| # Expand dimensions to match the shape (1, 48, 48, 1) | |
| image = np.expand_dims(image, axis=0) | |
| image = np.expand_dims(image, axis=-1) | |
| return image | |
| # Predict emotion | |
| def predict_emotion(image, model): | |
| # Make prediction | |
| prediction = model.predict(image) | |
| # Convert prediction to emotion label | |
| emotions = ['positive', 'negative', 'neutral'] # Adjust labels as needed | |
| predicted_emotion = emotions[np.argmax(prediction)] | |
| return predicted_emotion | |
| # Define paths to your music files | |
| music_files = { | |
| 'positive': [ | |
| 'happy_song1.mp3', | |
| 'happy_song2.mp3', | |
| 'happy_song3.mp3', | |
| # Add more positive songs as needed | |
| ], | |
| 'negative': [ | |
| 'sad_song1.mp3', | |
| # Add more negative songs as needed | |
| ], | |
| 'neutral': [ | |
| 'neutral_song1.mp3', | |
| 'neutral_song2.mp3', | |
| ] | |
| } | |
| # Menu options | |
| menu_options = [ | |
| "Introduction", | |
| "The Model", | |
| "Dataset Visualization", | |
| "Graph Visualization", | |
| "Explainable AI", | |
| "MLFlow", | |
| "Prediction", | |
| "Code Carbon" | |
| ] | |
| def open_mlflow_ui(): | |
| # Start the MLflow tracking server as a subprocess | |
| cmd = "mlflow ui --port 5000" | |
| subprocess.Popen(cmd, shell=True) | |
| def open_browser(url): | |
| webbrowser.open_new_tab(url) | |
| # Menu selection | |
| selected_menu = st.sidebar.selectbox("Menu", menu_options) | |
| # Display corresponding content based on the selected menu option | |
| if selected_menu == "Introduction": | |
| st.title("Introduction") | |
| st.subheader("Emotion-based Music Player") | |
| st.write(""" | |
| This application has the ability to detect a user's emotional state through a single photo. | |
| Once the emotion is detected, the app will play music that corresponds to the mood deduced from the image. | |
| """) | |
| # List of features or more details | |
| st.write(""" | |
| ### Features | |
| - **Emotion Detection**: Utilizes deep learning models to interpret emotions from facial expressions. | |
| - **Music Selection**: Dynamically selects music based on the emotion detected, enhancing the user's listening experience. | |
| - **Interactive Interface**: Simple and user-friendly interface to ensure ease of use for all users. | |
| """) | |
| st.write(""" | |
| ### Business Idea: EmoSync | |
| EmoSync is a subscription-based service that leverages facial analysis to enhance emotional well-being by tailoring music to the user's current mood. This app employs the principles of emotional congruence and catharsis, providing personalized musical therapy and mood tracking for those seeking emotional balance and mindfulness. | |
| When the facial analysis detects a user is happy, the service curates playlists with upbeat and lively tracks to reinforce and prolong the positive mood. | |
| Conversely, if a user is sad, EmoSync opts for reflective and somber music. This approach might seem counterintuitive, but it helps by validating the user's emotional state and facilitating a therapeutic release of emotions, known as emotional catharsis. | |
| Listening to music that resonates with their sadness allows users to process and gradually lighten their emotional load. | |
| As their mood begins to improve, EmoSync carefully transitions to more uplifting music, supporting a gentle and effective shift towards emotional positivity. | |
| This method ensures that users not only understand their emotional transitions but also find support and comfort through personalized musical therapy. | |
| """) | |
| # Image display, ensure the path or URL is correct | |
| image_url = "picture_final.png" # Replace with your actual URL or file path | |
| st.image(image_url, caption='Visual representation of Emotion-based Music Interaction') | |
| # Optional: Adding a video or additional media | |
| # st.video('https://path_to_a_relevant_video.mp4') # Optional: You can comment this out if not needed | |
| # # Interactive part: Feedback or user input | |
| # st.write("### We value your feedback!") | |
| # feedback = st.text_area("Share your thoughts about this app:") | |
| # if st.button('Submit Feedback'): | |
| # st.write("Thank you for your feedback!") | |
| # # Here you might want to do something with the feedback, like storing it or processing it. | |
| elif selected_menu == "The Model": | |
| st.title("Neural Networks") | |
| st.write("Neural networks are computational models designed to mimic the workings of the human brain. They consist of interconnected nodes, called neurons, which work together to process information and learn from data.") | |
| st.write(" ") | |
| st.image("neural_net.png") | |
| st.subheader("Learning in Neural Networks") | |
| st.write("**Forward Pass:** The input data is passed through the network, layer by layer, producing an output.") | |
| st.write(" ") | |
| st.video("ezgif-6-7827342558.mp4") | |
| st.write("**Loss Function:** The network's output is compared to the actual label (for supervised learning), and a loss value is computed to quantify the difference.") | |
| st.write(" ") | |
| st.write("**Backward Pass (Backpropagation):** The loss is used to calculate the gradients, which indicate how much each weight should be adjusted to reduce the error.") | |
| st.write(" ") | |
| st.write("**Optimization:** An optimization algorithm, such as Stochastic Gradient Descent (SGD) or Adam, adjusts the weights based on the gradients, iteratively reducing the loss.") | |
| st.write(" ") | |
| st.subheader("Key Terms") | |
| st.write("**Convolution Operation:** A process in a CNN where a small grid moves over the image and simplifies it by focusing on the important details, like edges and colors, helping the network understand the image better.") | |
| st.write(" ") | |
| st.write("**Filter/Kernel:** A tiny grid used in CNNs that looks at small parts of the image to find specific features like lines or corners.") | |
| st.write(" ") | |
| st.write("**Feature Map:** The result you get after a filter scans the image. It shows what the filter noticed, like different textures or shapes in the picture.") | |
| st.write(" ") | |
| st.write("**Pooling:** A step used to make the image data smaller and easier to manage by simplifying the details but keeping the important parts. It helps the network process images faster.") | |
| st.write(" ") | |
| st.write("**Backpropagation:** A way the network learns from mistakes. It looks at errors it made in recognizing images and adjusts itself to do better next time.") | |
| image_path_1 = 'Screenshot 2024-05-03 at 08.01.06.png' | |
| image_1 = Image.open(image_path_1) | |
| st.image(image_path_1, caption='CNN Architecture', use_column_width=True) | |
| image_path_2 = 'a59c0bc87849dbc574cda62eee57a5e1baffd8f1.jpeg' | |
| image_2 = Image.open(image_path_2) | |
| st.image(image_path_2, caption='A filter in action', use_column_width=True) | |
| elif selected_menu == "Dataset Visualization": | |
| st.title("Dataset Visualization") | |
| st.write("This is the Dataset Visualization page. We used the CK+ Dataset, comprising seven different emotional states including anger, contempt, disgust, fear, happy, sadness and surprise.") | |
| # Define the base directory containing the subfolders | |
| dataset_folder = 'Folder' | |
| # Desired order for the subfolders | |
| ordered_folders = ['anger', 'contempt', 'disgust', 'fear', 'happy', 'sadness', 'surprise'] | |
| # Filter and sort the actual subfolders to match the desired order | |
| sub_folders = [folder for folder in ordered_folders if folder in os.listdir(dataset_folder)] | |
| # Function to display images from each folder | |
| def display_images_from_folders(dataset_folder, sub_folders, num_images=10, images_per_row=5, image_size=(100, 100)): | |
| for sub_folder in sub_folders: | |
| st.title(sub_folder.title()) # Display folder name in title case | |
| folder_path = os.path.join(dataset_folder, sub_folder) | |
| if os.path.exists(folder_path): | |
| image_files = os.listdir(folder_path)[:num_images] # Get the first num_images files | |
| num_rows = (num_images + images_per_row - 1) // images_per_row # Calculate the number of rows needed | |
| for i in range(num_rows): | |
| row_images = image_files[i * images_per_row: (i + 1) * images_per_row] # Get images for this row | |
| col1, col2, col3, col4, col5 = st.columns(5) # Create 5 columns | |
| cols = [col1, col2, col3, col4, col5] | |
| for image_file, col in zip(row_images, cols): | |
| image_path = os.path.join(folder_path, image_file) | |
| image = cv2.imread(image_path) | |
| if image is not None: | |
| # Resize the image | |
| image = cv2.resize(image, image_size) | |
| # Display the image in the column | |
| col.image(image, caption=image_file, use_column_width=True) | |
| # Display images from each folder | |
| display_images_from_folders(dataset_folder, sub_folders) | |
| elif selected_menu == "Graph Visualization": | |
| st.title("Graph Visualization") | |
| st.write("This is the Graph Visualization page.") | |
| # Dropdown to select model | |
| selected_model = st.selectbox("Select Model", ["Model 1", "Model 2"]) | |
| # Load the selected model | |
| if selected_model == "Model 1": | |
| model_path = 'output/emotion_model.h5' | |
| history = np.load('output/saved_history_1.npz', allow_pickle=True) | |
| else: | |
| model_path = 'output/emotion_model_new.h5' | |
| history = np.load('output/saved_history_2.npz', allow_pickle=True) | |
| # Load the saved model and its training history | |
| model = load_model(model_path) | |
| # Extract the training and validation loss from the history | |
| train_loss = history['loss'] | |
| val_loss = history['val_loss'] | |
| # Extract the training and validation accuracy from the history | |
| train_accuracy = history['accuracy'] | |
| val_accuracy = history['val_accuracy'] | |
| # Create a Streamlit app | |
| st.title('Training and Validation Loss over Epochs') | |
| st.write("Loss is a measure of how 'wrong' the model's predictions are compared to the actual values. The training loss shows how well the model is performing on the training data during each training step (called an epoch).") | |
| st.write(" ") | |
| st.write("Monitoring loss helps us understand if the model is improving over time.") | |
| st.write(" ") | |
| st.write("We want both training and validation loss to decrease over epochs, indicating that the model is learning.") | |
| # Plot the data | |
| fig1, ax = plt.subplots() | |
| epochs = range(1, len(train_loss) + 1) | |
| ax.plot(epochs, train_loss, 'bo', label='Training loss') | |
| ax.plot(epochs, val_loss, 'r', label='Validation loss') | |
| ax.set_title('Training and Validation Loss') | |
| ax.set_xlabel('Epochs') | |
| ax.set_ylabel('Loss') | |
| ax.legend() | |
| # Display the plot using Streamlit | |
| st.pyplot(fig1) | |
| # Create a Streamlit app | |
| st.title('Training and Validation Accuracy over Epochs') | |
| st.write("Training and validation accuracy measure how well a machine learning model performs on both the training data and new, unseen data (validation set) over time (epochs).") | |
| st.write(" ") | |
| st.write("Monitoring accuracy helps us understand how well the model is learning and generalizing from the data. We want both training and validation accuracy to increase over epochs, indicating that the model is improving its predictive ability.") | |
| st.write(" ") | |
| st.write("We want both training and validation accuracy to increase over epochs, indicating that the model is improving its predictive ability.") | |
| # Plot the data | |
| fig2, ax = plt.subplots() | |
| epochs = range(1, len(train_accuracy) + 1) | |
| ax.plot(epochs, train_accuracy, 'bo', label='Training accuracy') | |
| ax.plot(epochs, val_accuracy, 'r', label='Validation accuracy') | |
| ax.set_title('Training and Validation Accuracy') | |
| ax.set_xlabel('Epochs') | |
| ax.set_ylabel('Accuracy') | |
| ax.legend() | |
| # Display the plot using Streamlit | |
| st.pyplot(fig2) | |
| elif selected_menu == "Explainable AI": | |
| st.title("Explainable AI") | |
| st.write("This is the Explainable AI page.") | |
| st.write(" ") | |
| st.write("Brighter areas in the SHAP image indicate pixels that strongly influenced the model's decision about the person's emotion. For instance, if the model predicts happiness and you see a bright spot around the corners of the mouth, it suggests that those pixels strongly contribute to the model's confidence in its prediction.") | |
| st.write(" ") | |
| st.write("On the other hand, darker areas represent pixels that had less impact on the model's decision. If there are dark regions in the background of the image, they might not have influenced the model's prediction much about the person's emotion. ") | |
| st.write(" ") | |
| st.write("The use of red and blue colorings helps distinguish between positive and negative SHAP values. Positive values indicate that the corresponding pixels contribute to the model's prediction in favor of a certain class or outcome (e.g., happiness in emotion recognition), while negative values suggest contributions against that prediction.") | |
| st.write(" ") | |
| st.write("When interpreting the SHAP image, brighter red areas suggest pixels that strongly support the model's prediction, while brighter blue areas highlight pixels that strongly oppose the prediction.") | |
| # Dropdown to select model | |
| selected_model = st.selectbox("Select Model", ["Model 1", "Model 2"]) | |
| # Load the selected model | |
| if selected_model == "Model 1": | |
| model_path = 'output/emotion_model.h5' | |
| else: | |
| model_path = 'output/emotion_model_new.h5' | |
| # Load the saved model and its training history | |
| model = load_model(model_path) | |
| # Define the Streamlit app | |
| st.title('SHAP Explanation for Image Classification') | |
| st.write(" ") | |
| st.write(" ") | |
| st.image("Screenshot 2024-05-03 at 3.41.00 PM.png") | |
| elif selected_menu == "MLFlow": | |
| st.title("MLFlow") | |
| st.write("This is the MLFlow page. Here you can access MLFlow logs.") | |
| link = "https://dagshub.com/rohan-26/Emotion-Model.mlflow/#/compare-experiments/s?experiments=%5B%220%22%2C%221%22%5D&searchFilter=&orderByKey=attributes.start_time&orderByAsc=false&startTime=ALL&lifecycleFilter=Active&modelVersionFilter=All+Runs&datasetsFilter=W10%3D" | |
| ui.link_button(text="Link to the MLFlow page", url=link, key="link_btnmlflow") | |
| elif selected_menu == "Prediction": | |
| st.title("Prediction") | |
| # Dropdown to select model | |
| selected_model = st.selectbox("Select Model", ["Model 1", "Model 2"]) # Add more models as needed | |
| experiment = mlflow.set_experiment("Emotion Detection for Music") | |
| with mlflow.start_run(experiment_id=experiment.experiment_id, nested=True): | |
| # Load the selected model | |
| if selected_model == "Model 1": | |
| model_path = 'output/emotion_model.h5' | |
| history = np.load('output/saved_history_1.npz', allow_pickle=True) | |
| else: | |
| model_path = 'output/emotion_model_new1234.h5' | |
| history = np.load('output/saved_history_2.npz', allow_pickle=True) | |
| model = load_model(model_path) | |
| # Extract the training and validation loss from the history | |
| train_loss = history['loss'] | |
| val_loss = history['val_loss'] | |
| # Log selected model to MLFlow | |
| mlflow.log_param("selected_model", selected_model) | |
| mlflow.sklearn.log_model(model, "model") | |
| #Calculate the mean of the train_loss array | |
| mean_train_loss = np.mean(train_loss) | |
| # Log the mean_train_loss as a metric | |
| mlflow.log_metric("Mean Training Loss", mean_train_loss) | |
| # Calculate the mean of the train_loss array | |
| mean_validation_loss = np.mean(val_loss) | |
| # Log the mean_train_loss as a metric | |
| mlflow.log_metric("Mean Validation Loss", mean_validation_loss) | |
| uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png"]) | |
| if uploaded_file is not None: | |
| # Convert the file to an OpenCV image | |
| file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8) | |
| opencv_image = cv2.imdecode(file_bytes, 1) | |
| if st.button('Analyze Emotion'): | |
| processed_image = load_and_preprocess_image(opencv_image) | |
| emotion = predict_emotion(processed_image, model) | |
| if emotion == 'positive': | |
| # Display the emotion with a happy emoji | |
| st.write('Predicted Emotion: {} π'.format(emotion)) | |
| elif emotion == 'negative': | |
| # Display the emotion with a sad emoji | |
| st.write('Predicted Emotion: {} π’'.format(emotion)) | |
| elif emotion == 'neutral': | |
| # Display the emotion with a neutral emoji | |
| st.write('Predicted Emotion: {} π'.format(emotion)) | |
| # Play music based on the emotion | |
| audio_path = random.choice(music_files[emotion]) | |
| # audio_path = music_files[emotion] | |
| audio_file = open(audio_path, 'rb') | |
| audio_bytes = audio_file.read() | |
| st.audio(audio_bytes, format='audio/mp3') | |
| mlflow.keras.log_model(model, "CNN_model") | |
| elif selected_menu == "Code Carbon": | |
| emissions = tracker.stop() | |
| st.title("Carbon Tracking Results") | |
| st.write(f"Estimated emissions for training this model: 0.0048 kg of CO2") | |