| import streamlit as st |
| |
| import google-generativeai as genai |
| import os |
| from PIL import Image |
| import cv2 |
| from io import BytesIO |
| import base64 |
| from dotenv import load_dotenv |
| import numpy as np |
| from deepface import DeepFace |
| print("DeepFace is installed and ready to use!") |
| |
| i |
|
|
| |
| print("Google Generative AI module is successfully imported!") |
|
|
|
|
|
|
| load_dotenv() |
|
|
| genai.configure(api_key=("AIzaSyBP1kQr-80Aq_K5_9AVgD1MLJqs05Cg20Q")) |
|
|
| |
| def get_gemini_response(input): |
| try: |
| model = genai.GenerativeModel('gemini-pro') |
| response = model.generate_content(input) |
| return response |
| except Exception as e: |
| st.error(f"Error: {e}") |
| return None |
|
|
| |
| def detect_emotions(image): |
| try: |
| |
| analysis = DeepFace.analyze(image, actions=['emotion'], enforce_detection=False) |
| |
| return analysis[0]['dominant_emotion'], analysis[0]['emotion'] |
| except Exception as e: |
| st.error(f"Error during emotion detection: {e}") |
| return None, None |
|
|
| |
| def analyze_emotions_with_llm(emotion, emotions): |
| emotion_analysis = f"{emotion}: {emotions[emotion]:.2f}" |
| |
| analysis_prompt = f""" |
| ### As a mental health and emotional well-being expert, analyze the following detected emotions. |
| ### Detected Emotions: |
| {emotion_analysis} |
| ### Analysis Output: |
| 1. Identify any potential signs of depression based on the detected emotions. |
| 2. Explain the reasoning behind your identification. |
| 3. Provide recommendations for addressing any identified issues. |
| """ |
| response = get_gemini_response(analysis_prompt) |
| return response |
|
|
| |
| def capture_video_frame(): |
| video_capture = cv2.VideoCapture(0) |
| if not video_capture.isOpened(): |
| st.error("Failed to access the webcam. Ensure you have allowed camera access in your browser.") |
| return None |
| ret, frame = video_capture.read() |
| video_capture.release() |
| if ret: |
| frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
| return Image.fromarray(frame_rgb) |
| else: |
| st.error("Failed to capture a frame from the webcam.") |
| return None |
|
|
| |
| def display_response_content(response): |
| st.subheader("Response Output") |
| if response and response.candidates: |
| response_content = response.candidates[0].content.parts[0].text if response.candidates[0].content.parts else "" |
| sections = response_content.split('###') |
| for section in sections: |
| if section.strip(): |
| section_lines = section.split('\n') |
| section_title = section_lines[0].strip() |
| section_body = '\n'.join(line.strip() for line in section_lines[1:] if line.strip()) |
| if section_title: |
| st.markdown(f"**{section_title}**") |
| if section_body: |
| st.write(section_body) |
| else: |
| st.write("No response received from the model or quota exceeded.") |
|
|
| |
| st.title("AI-Powered Depression and Emotion Detection System") |
| st.text("Use the AI system for detecting depression and emotions from images and live video.") |
|
|
| |
| tab1, tab2 = st.tabs(["Image Analysis", "Live Video Analysis"]) |
|
|
| with tab1: |
| st.header("Image Analysis") |
| uploaded_file = st.file_uploader("Upload an image for analysis", type=["jpg", "jpeg", "png"], help="Please upload an image file.") |
| submit_image = st.button('Analyze Image') |
| |
| if submit_image: |
| if uploaded_file is not None: |
| image = Image.open(uploaded_file) |
| emotion, emotions = detect_emotions(image) |
| if emotion: |
| response = analyze_emotions_with_llm(emotion, emotions) |
| |
| display_response_content(response) |
| else: |
| st.write("No emotions detected in the image.") |
|
|
| with tab2: |
| st.header("Live Video Analysis") |
| capture_frame = st.button('Capture and Analyze Frame') |
| |
| if capture_frame: |
| image = capture_video_frame() |
| if image is not None: |
| emotion, emotions = detect_emotions(image) |
| if emotion: |
| response = analyze_emotions_with_llm(emotion, emotions) |
| |
| display_response_content(response) |
| else: |
| st.write("No emotions detected in the video frame.") |
| else: |
| st.write("Failed to capture video frame.") |
|
|