arpita-23 commited on
Commit
f0f0312
·
verified ·
1 Parent(s): a5d793c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +126 -0
app.py CHANGED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import google.generativeai as genai
3
+ import os
4
+ from PIL import Image
5
+ import cv2
6
+ from io import BytesIO
7
+ import base64
8
+ from dotenv import load_dotenv
9
+ import numpy as np
10
+ from deepface import DeepFace # Replacing FER with DeepFace
11
+ print("DeepFace is installed and ready to use!")
12
+
13
+
14
+ load_dotenv()
15
+
16
+ genai.configure(api_key=("AIzaSyBP1kQr-80Aq_K5_9AVgD1MLJqs05Cg20Q"))
17
+
18
+ # gemini function for general content generation
19
+ def get_gemini_response(input):
20
+ try:
21
+ model = genai.GenerativeModel('gemini-pro')
22
+ response = model.generate_content(input)
23
+ return response
24
+ except Exception as e:
25
+ st.error(f"Error: {e}")
26
+ return None
27
+
28
+ # Function to analyze image for depression and emotion detection using DeepFace
29
+ def detect_emotions(image):
30
+ try:
31
+ # Use DeepFace to analyze emotions
32
+ analysis = DeepFace.analyze(image, actions=['emotion'], enforce_detection=False)
33
+ # Return the dominant emotion and its score
34
+ return analysis[0]['dominant_emotion'], analysis[0]['emotion']
35
+ except Exception as e:
36
+ st.error(f"Error during emotion detection: {e}")
37
+ return None, None
38
+
39
+ # Function to analyze detected emotions with LLM
40
+ def analyze_emotions_with_llm(emotion, emotions):
41
+ emotion_analysis = f"{emotion}: {emotions[emotion]:.2f}"
42
+
43
+ analysis_prompt = f"""
44
+ ### As a mental health and emotional well-being expert, analyze the following detected emotions.
45
+ ### Detected Emotions:
46
+ {emotion_analysis}
47
+ ### Analysis Output:
48
+ 1. Identify any potential signs of depression based on the detected emotions.
49
+ 2. Explain the reasoning behind your identification.
50
+ 3. Provide recommendations for addressing any identified issues.
51
+ """
52
+ response = get_gemini_response(analysis_prompt)
53
+ return response
54
+
55
+ # Function to capture live video frame for analysis
56
+ def capture_video_frame():
57
+ video_capture = cv2.VideoCapture(0)
58
+ if not video_capture.isOpened():
59
+ st.error("Failed to access the webcam. Ensure you have allowed camera access in your browser.")
60
+ return None
61
+ ret, frame = video_capture.read()
62
+ video_capture.release()
63
+ if ret:
64
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
65
+ return Image.fromarray(frame_rgb)
66
+ else:
67
+ st.error("Failed to capture a frame from the webcam.")
68
+ return None
69
+
70
+ # Function to parse and display response content
71
+ def display_response_content(response):
72
+ st.subheader("Response Output")
73
+ if response and response.candidates:
74
+ response_content = response.candidates[0].content.parts[0].text if response.candidates[0].content.parts else ""
75
+ sections = response_content.split('###')
76
+ for section in sections:
77
+ if section.strip():
78
+ section_lines = section.split('\n')
79
+ section_title = section_lines[0].strip()
80
+ section_body = '\n'.join(line.strip() for line in section_lines[1:] if line.strip())
81
+ if section_title:
82
+ st.markdown(f"**{section_title}**")
83
+ if section_body:
84
+ st.write(section_body)
85
+ else:
86
+ st.write("No response received from the model or quota exceeded.")
87
+
88
+ ## Streamlit App
89
+ st.title("AI-Powered Depression and Emotion Detection System")
90
+ st.text("Use the AI system for detecting depression and emotions from images and live video.")
91
+
92
+ # Tabs for different functionalities
93
+ tab1, tab2 = st.tabs(["Image Analysis", "Live Video Analysis"])
94
+
95
+ with tab1:
96
+ st.header("Image Analysis")
97
+ uploaded_file = st.file_uploader("Upload an image for analysis", type=["jpg", "jpeg", "png"], help="Please upload an image file.")
98
+ submit_image = st.button('Analyze Image')
99
+
100
+ if submit_image:
101
+ if uploaded_file is not None:
102
+ image = Image.open(uploaded_file)
103
+ emotion, emotions = detect_emotions(image)
104
+ if emotion:
105
+ response = analyze_emotions_with_llm(emotion, emotions)
106
+ # Parse and display response in a structured way
107
+ display_response_content(response)
108
+ else:
109
+ st.write("No emotions detected in the image.")
110
+
111
+ with tab2:
112
+ st.header("Live Video Analysis")
113
+ capture_frame = st.button('Capture and Analyze Frame')
114
+
115
+ if capture_frame:
116
+ image = capture_video_frame()
117
+ if image is not None:
118
+ emotion, emotions = detect_emotions(image)
119
+ if emotion:
120
+ response = analyze_emotions_with_llm(emotion, emotions)
121
+ # Parse and display response in a structured way
122
+ display_response_content(response)
123
+ else:
124
+ st.write("No emotions detected in the video frame.")
125
+ else:
126
+ st.write("Failed to capture video frame.")