Spaces:
Sleeping
Sleeping
File size: 4,306 Bytes
2056aa0 0a12875 2056aa0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
import streamlit as st
import google.generativeai as genai
import os
from PIL import Image
import cv2
from io import BytesIO
import base64
from dotenv import load_dotenv
import numpy as np
from fer import FER
load_dotenv()
genai.configure(api_key=("AIzaSyC0E5Tcbp3V6LXmw0ZuD2ih_nJkBronHhw"))
# gemini function for general content generation
def get_gemini_response(input):
try:
model = genai.GenerativeModel('gemini-pro')
response = model.generate_content(input)
return response
except Exception as e:
st.error(f"Error: {e}")
return None
# Function to analyze image for depression and emotion detection using FER
def detect_emotions(image):
detector = FER(mtcnn=True)
# Convert PIL Image to NumPy array
image_np = np.array(image)
emotions = detector.detect_emotions(image_np)
if emotions:
return emotions[0]['emotions']
return None
# Function to analyze detected emotions with LLM
def analyze_emotions_with_llm(emotions):
emotion_analysis = ", ".join([f"{emotion}: {score:.2f}" for emotion, score in emotions.items()])
analysis_prompt = f"""
### As a mental health and emotional well-being expert, analyze the following detected emotions.
### Detected Emotions:
{emotion_analysis}
### Analysis Output:
1. Identify any potential signs of depression based on the detected emotions.
2. Explain the reasoning behind your identification.
3. Provide recommendations for addressing any identified issues.
"""
response = get_gemini_response(analysis_prompt)
return response
# Function to capture live video frame for analysis
def capture_video_frame():
video_capture = cv2.VideoCapture(0)
ret, frame = video_capture.read()
if ret:
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return Image.fromarray(frame_rgb)
return None
# Function to parse and display response content
def display_response_content(response):
st.subheader("Response Output")
if response and response.candidates:
response_content = response.candidates[0].content.parts[0].text if response.candidates[0].content.parts else ""
sections = response_content.split('###')
for section in sections:
if section.strip():
section_lines = section.split('\n')
section_title = section_lines[0].strip()
section_body = '\n'.join(line.strip() for line in section_lines[1:] if line.strip())
if section_title:
st.markdown(f"**{section_title}**")
if section_body:
st.write(section_body)
else:
st.write("No response received from the model or quota exceeded.")
## Streamlit App
st.title("AI-Powered Depression and Emotion Detection System")
st.text("Use the AI system for detecting depression and emotions from images and live video.")
# Tabs for different functionalities
tab1, tab2 = st.tabs(["Image Analysis", "Live Video Analysis"])
with tab1:
st.header("Image Analysis")
uploaded_file = st.file_uploader("Upload an image for analysis", type=["jpg", "jpeg", "png"], help="Please upload an image file.")
submit_image = st.button('Analyze Image')
if submit_image:
if uploaded_file is not None:
image = Image.open(uploaded_file)
emotions = detect_emotions(image)
if emotions:
response = analyze_emotions_with_llm(emotions)
# Parse and display response in a structured way
display_response_content(response)
else:
st.write("No emotions detected in the image.")
with tab2:
st.header("Live Video Analysis")
capture_frame = st.button('Capture and Analyze Frame')
if capture_frame:
image = capture_video_frame()
if image is not None:
emotions = detect_emotions(image)
if emotions:
response = analyze_emotions_with_llm(emotions)
# Parse and display response in a structured way
display_response_content(response)
else:
st.write("No emotions detected in the video frame.")
else:
st.write("Failed to capture video frame.")
|