ZunairaHawwar commited on
Commit
e772466
·
verified ·
1 Parent(s): c7c3226

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +199 -0
app.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os, json, datetime
3
+ from langchain_community.vectorstores import FAISS
4
+ from langchain_community.embeddings import HuggingFaceEmbeddings
5
+ from langchain_groq import ChatGroq
6
+ from langchain.chains import LLMChain
7
+ from langchain.prompts import PromptTemplate
8
+ from gtts import gTTS
9
+ from pathlib import Path
10
+ from dotenv import load_dotenv
11
+ from sentence_transformers import SentenceTransformer, util
12
+ import altair as alt
13
+ import speech_recognition as sr
14
+ from transformers import pipeline
15
+ import torch
16
+
17
+ # Load environment variables
18
+ load_dotenv()
19
+ GROQ_API_KEY = os.getenv("GROQ_API_KEY")
20
+
21
+ CRISIS_KEYWORDS = ["suicide", "kill myself", "end it all", "worthless", "can't go on", "hurt myself", "self harm", "want to disappear", "no reason to live"]
22
+
23
+ # Emotion detection
24
+ def detect_emotion(text):
25
+ emotion_pipeline = pipeline(
26
+ "text-classification",
27
+ model="j-hartmann/emotion-english-distilroberta-base",
28
+ top_k=1,
29
+ device=-1
30
+ )
31
+ prediction = emotion_pipeline(text)[0][0]
32
+ return prediction['label'].lower(), prediction['score']
33
+
34
+ # Streamlit UI
35
+ st.set_page_config(page_title="DilBot - Emotional AI", page_icon="🧠")
36
+ st.title("🧠 DilBot - Emotional AI Companion")
37
+ st.markdown("A GenAI app that feels your words, finds comforting quotes, and replies with empathy.")
38
+
39
+ # Quote categories
40
+ quote_categories = {
41
+ "Grief": ["Grief is the price we pay for love.", "Tears are the silent language of grief."],
42
+ "Motivation": ["Believe in yourself and all that you are.", "Tough times never last, but tough people do."],
43
+ "Healing": ["Every wound has its own time to heal.", "It's okay to take your time to feel better."],
44
+ "Relationships": ["The best relationships are built on trust.", "Love is not about possession but appreciation."]
45
+ }
46
+ selected_category = st.selectbox(" Choose a quote theme:", list(quote_categories.keys()))
47
+ uploaded_quotes = st.file_uploader(" Upload your own quotes (.txt)", type=["txt"])
48
+ uploaded_audio = st.file_uploader("Upload a voice message (.wav)", type=["wav"])
49
+
50
+ # Vectorstore
51
+ @st.cache_resource(show_spinner=False)
52
+ def build_vectorstore(quotes):
53
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
54
+ return FAISS.from_texts(quotes, embedding=embeddings)
55
+
56
+ if uploaded_quotes:
57
+ custom_quotes = uploaded_quotes.read().decode("utf-8").splitlines()
58
+ vectorstore = build_vectorstore(custom_quotes)
59
+ current_quotes = custom_quotes
60
+ st.success("Custom quotes uploaded.")
61
+ else:
62
+ default_quotes = quote_categories[selected_category]
63
+ vectorstore = build_vectorstore(default_quotes)
64
+ current_quotes = default_quotes
65
+
66
+ # Crisis check
67
+ def is_crisis(text):
68
+ return any(phrase in text.lower() for phrase in CRISIS_KEYWORDS)
69
+
70
+ # Prompt setup
71
+ prompt_template = PromptTemplate(
72
+ input_variables=["context", "user_input"],
73
+ template="""
74
+ You are DilBot, an empathetic emotional support AI. Use the following emotional quote context to respond gently and supportively.
75
+
76
+ Context:
77
+ {context}
78
+
79
+ User:
80
+ {user_input}
81
+
82
+ DilBot:"""
83
+ )
84
+
85
+ # LLM
86
+ groq_llm = ChatGroq(api_key=GROQ_API_KEY, model="llama3-70b-8192")
87
+
88
+ def get_response(user_input):
89
+ similar_docs = vectorstore.similarity_search(user_input, k=2)
90
+ context = "\n".join([doc.page_content for doc in similar_docs])
91
+ chain = LLMChain(llm=groq_llm, prompt=prompt_template)
92
+ return chain.run(context=context, user_input=user_input)
93
+
94
+ # Save journal
95
+ def save_journal(user_input, emotion, score, response, path="journal.json"):
96
+ entry = {
97
+ "date": str(datetime.date.today()),
98
+ "user_input": user_input,
99
+ "emotion": emotion,
100
+ "confidence": round(score * 100, 2),
101
+ "response": response
102
+ }
103
+ journal = []
104
+ if os.path.exists(path):
105
+ with open(path, "r") as f:
106
+ journal = json.load(f)
107
+ journal.append(entry)
108
+ with open(path, "w") as f:
109
+ json.dump(journal, f, indent=4)
110
+
111
+ # Speak response
112
+ def speak(text):
113
+ tts = gTTS(text)
114
+ path = Path("response.mp3")
115
+ tts.save(path)
116
+ st.audio(str(path), format="audio/mp3")
117
+
118
+ # Transcribe voice
119
+ def transcribe_audio_file(uploaded_audio):
120
+ recognizer = sr.Recognizer()
121
+ try:
122
+ with sr.AudioFile(uploaded_audio) as source:
123
+ audio_data = recognizer.record(source)
124
+ text = recognizer.recognize_google(audio_data)
125
+ return text
126
+ except Exception as e:
127
+ return f"Error: {str(e)}"
128
+
129
+ # Store transcription
130
+ if "transcribed_text" not in st.session_state:
131
+ st.session_state.transcribed_text = ""
132
+
133
+ if uploaded_audio and st.button("Transcribe Voice"):
134
+ transcribed = transcribe_audio_file(uploaded_audio)
135
+ if transcribed.startswith("Error:"):
136
+ st.error(transcribed)
137
+ else:
138
+ st.session_state.transcribed_text = transcribed
139
+ st.success("Voice transcribed successfully!")
140
+
141
+ # Input field
142
+ user_input = st.text_area(
143
+ "💬 What's on your mind?",
144
+ value=st.session_state.transcribed_text,
145
+ height=100
146
+ )
147
+
148
+ final_input = user_input.strip() or st.session_state.transcribed_text.strip()
149
+
150
+ # Single "Talk" Button
151
+ if st.button("🧠 Talk to DilBot"):
152
+ if not final_input:
153
+ st.warning("Please enter or upload something to share.")
154
+ else:
155
+ with st.spinner("DilBot is thinking..."):
156
+ emotion, score = detect_emotion(final_input)
157
+ response = get_response(final_input)
158
+ save_journal(final_input, emotion, score, response)
159
+
160
+ st.success(f"**Emotion Detected:** {emotion.capitalize()} ({round(score*100)}% confidence)")
161
+ if is_crisis(final_input):
162
+ st.error("Crisis phrase detected! Please reach out to a mental health professional.")
163
+
164
+ if current_quotes:
165
+ model = SentenceTransformer("all-MiniLM-L6-v2")
166
+ quote_embeddings = model.encode(current_quotes, convert_to_tensor=True)
167
+ user_embedding = model.encode(final_input, convert_to_tensor=True)
168
+ sims = util.pytorch_cos_sim(user_embedding, quote_embeddings)[0]
169
+ best_match = sims.argmax().item()
170
+ selected_quote = current_quotes[best_match]
171
+ st.info(f"Quote for you: “{selected_quote}”")
172
+
173
+ st.markdown(f"**DilBot Says:** {response}")
174
+ speak(response)
175
+
176
+ # Mood tracker
177
+ if os.path.exists("journal.json"):
178
+ with open("journal.json", "r") as f:
179
+ journal_data = json.load(f)
180
+
181
+ df = {
182
+ "date": [entry["date"] for entry in journal_data],
183
+ "emotion": [entry["emotion"] for entry in journal_data],
184
+ }
185
+
186
+ st.markdown("**Daily Mood Tracker**")
187
+ chart = alt.Chart(alt.Data(values=df)).mark_bar().encode(
188
+ x='date:N',
189
+ y='count():Q',
190
+ color='emotion:N'
191
+ )
192
+ st.altair_chart(chart, use_container_width=True)
193
+
194
+ st.markdown("**Recent Conversations**")
195
+ for item in reversed(journal_data[-5:]):
196
+ st.markdown(f"`{item['date']}`\n- **You:** {item['user_input']}\n- **Emotion:** {item['emotion'].capitalize()} ({item['confidence']}%)\n- **DilBot:** {item['response']}")
197
+
198
+ st.markdown("---")
199
+ st.caption("Built Ahmad Sana Farooq( Member of CSG Hackathon Team)")