Krish30's picture
Update app.py
0a12875 verified
import streamlit as st
import google.generativeai as genai
import os
from PIL import Image
import cv2
from io import BytesIO
import base64
from dotenv import load_dotenv
import numpy as np
from fer import FER
load_dotenv()
genai.configure(api_key=("AIzaSyC0E5Tcbp3V6LXmw0ZuD2ih_nJkBronHhw"))
# gemini function for general content generation
def get_gemini_response(input):
try:
model = genai.GenerativeModel('gemini-pro')
response = model.generate_content(input)
return response
except Exception as e:
st.error(f"Error: {e}")
return None
# Function to analyze image for depression and emotion detection using FER
def detect_emotions(image):
detector = FER(mtcnn=True)
# Convert PIL Image to NumPy array
image_np = np.array(image)
emotions = detector.detect_emotions(image_np)
if emotions:
return emotions[0]['emotions']
return None
# Function to analyze detected emotions with LLM
def analyze_emotions_with_llm(emotions):
emotion_analysis = ", ".join([f"{emotion}: {score:.2f}" for emotion, score in emotions.items()])
analysis_prompt = f"""
### As a mental health and emotional well-being expert, analyze the following detected emotions.
### Detected Emotions:
{emotion_analysis}
### Analysis Output:
1. Identify any potential signs of depression based on the detected emotions.
2. Explain the reasoning behind your identification.
3. Provide recommendations for addressing any identified issues.
"""
response = get_gemini_response(analysis_prompt)
return response
# Function to capture live video frame for analysis
def capture_video_frame():
video_capture = cv2.VideoCapture(0)
ret, frame = video_capture.read()
if ret:
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return Image.fromarray(frame_rgb)
return None
# Function to parse and display response content
def display_response_content(response):
st.subheader("Response Output")
if response and response.candidates:
response_content = response.candidates[0].content.parts[0].text if response.candidates[0].content.parts else ""
sections = response_content.split('###')
for section in sections:
if section.strip():
section_lines = section.split('\n')
section_title = section_lines[0].strip()
section_body = '\n'.join(line.strip() for line in section_lines[1:] if line.strip())
if section_title:
st.markdown(f"**{section_title}**")
if section_body:
st.write(section_body)
else:
st.write("No response received from the model or quota exceeded.")
## Streamlit App
st.title("AI-Powered Depression and Emotion Detection System")
st.text("Use the AI system for detecting depression and emotions from images and live video.")
# Tabs for different functionalities
tab1, tab2 = st.tabs(["Image Analysis", "Live Video Analysis"])
with tab1:
st.header("Image Analysis")
uploaded_file = st.file_uploader("Upload an image for analysis", type=["jpg", "jpeg", "png"], help="Please upload an image file.")
submit_image = st.button('Analyze Image')
if submit_image:
if uploaded_file is not None:
image = Image.open(uploaded_file)
emotions = detect_emotions(image)
if emotions:
response = analyze_emotions_with_llm(emotions)
# Parse and display response in a structured way
display_response_content(response)
else:
st.write("No emotions detected in the image.")
with tab2:
st.header("Live Video Analysis")
capture_frame = st.button('Capture and Analyze Frame')
if capture_frame:
image = capture_video_frame()
if image is not None:
emotions = detect_emotions(image)
if emotions:
response = analyze_emotions_with_llm(emotions)
# Parse and display response in a structured way
display_response_content(response)
else:
st.write("No emotions detected in the video frame.")
else:
st.write("Failed to capture video frame.")