File size: 24,217 Bytes
3a3e36d
f239ec4
6a599ec
3a3e36d
260ac9a
 
c4a1f1b
260ac9a
 
3a3e36d
87445cf
bc94255
37f1a36
bc94255
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260ac9a
3154d0b
 
 
 
 
 
bc94255
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f239ec4
bc94255
f239ec4
bc94255
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f239ec4
bc94255
 
de1ec6e
7c1bc3c
f239ec4
 
 
674e4e2
91d4115
49e505c
681d30e
3154d0b
49e505c
3154d0b
 
 
 
 
 
 
f239ec4
3154d0b
f239ec4
3154d0b
 
f239ec4
3154d0b
f239ec4
3154d0b
 
5e5af51
f239ec4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3154d0b
97dbda3
daaaad8
 
 
f239ec4
 
 
daaaad8
 
3154d0b
 
 
f239ec4
3154d0b
 
 
 
f239ec4
 
 
 
 
 
 
 
 
 
 
 
27ad3e8
8cb32ed
49e505c
3154d0b
f239ec4
49e505c
 
3154d0b
f239ec4
49e505c
f239ec4
49e505c
91d4115
f239ec4
49e505c
3154d0b
 
 
 
 
f239ec4
3154d0b
 
 
8cb32ed
f239ec4
3154d0b
 
 
 
f239ec4
3154d0b
 
 
8cb32ed
 
3154d0b
 
 
 
 
 
 
 
8cb32ed
b214ed2
6e97dbf
b214ed2
 
8cb32ed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
import gradio as gr
#from gradio.components import Box
import torch
import os
import faiss
import pickle
from transformers import AutoModelForCausalLM, AutoTokenizer
from sentence_transformers import SentenceTransformer, util
from huggingface_hub import hf_hub_download
import pandas as pd
from datasets import Dataset
from huggingface_hub import create_repo

os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "0"

# πŸ”Ή Ensure bitsandbytes installation (for efficient model loading)
try:
    if torch.cuda.is_available():
        import bitsandbytes as bnb  # Use the GPU version if CUDA is available
        print("Using bitsandbytes with GPU support.")
    else:
        import bitsandbytes  # Fallback to CPU version (as bitsandbytes supports both)
        print("Using bitsandbytes with CPU support.")
except ImportError:
    print("bitsandbytes not found, falling back to CPU.")

# πŸ”Ή Load Hugging Face Model Repo Details
MODEL_NAME = "AvocadoMuffin/Gemma_Fine_Tuned_Model"
HF_TOKEN = os.getenv("HF_TOKEN")  # Ensure this is set in your environment

# πŸ”Ή Load Tokenizer & Model
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.float16 if torch.cuda.is_available() else torch.float32

tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=dtype, token=HF_TOKEN).to(device)

tokenizer.pad_token = tokenizer.eos_token

# πŸ”Ή Load FAISS Index & Knowledge from HF Model Repo
faiss_index_path = hf_hub_download(repo_id=MODEL_NAME, filename="faiss_index/index.faiss", token=HF_TOKEN)
knowledge_path = hf_hub_download(repo_id=MODEL_NAME, filename="faiss_index/knowledge.pkl", token=HF_TOKEN)

# Load FAISS Index
index = faiss.read_index(faiss_index_path)

# Load Knowledge.pkl (Contains text data + embeddings)
with open(knowledge_path, "rb") as f:
    knowledge_data = pickle.load(f)

# Validate knowledge data
if isinstance(knowledge_data, dict) and "questions" in knowledge_data and "answers" in knowledge_data:
    questions = knowledge_data["questions"]
    answers = knowledge_data["answers"]
else:
    raise ValueError("❌ knowledge.pkl does not contain expected questions and answers keys!")

# πŸ”Ή Load Sentence Transformer Embedding Model
embedding_model = SentenceTransformer("multi-qa-mpnet-base-dot-v1")

print("βœ… FAISS Index, Knowledge Data, and Embedding Model Loaded Successfully!")

# πŸ”Ή Function: Retrieve Relevant Context from FAISS
def retrieve_relevant_context(query, index, answers, embedding_model, top_k=3, similarity_threshold=0.8):
    """Retrieve top-k relevant knowledge snippets from FAISS index."""
    
    query_embedding = embedding_model.encode([query], convert_to_numpy=True)
    distances, indices = index.search(query_embedding, top_k)

    retrieved_contexts = []
    seen_embeddings = []

    for idx in indices[0]:
        if 0 <= idx < len(answers):  
            answer = answers[idx]
            answer_embedding = embedding_model.encode([answer], convert_to_numpy=True)

            if all(util.cos_sim(answer_embedding, emb) < similarity_threshold for emb in seen_embeddings):
                retrieved_contexts.append(answer)
                seen_embeddings.append(answer_embedding)

    return "\n".join(retrieved_contexts[:top_k])

# Define system messages for different modes
SYSTEM_MESSAGES = {
    "general": "You are a helpful AI assistant for everyday tasks.",
    "mental_health": "You are a helpful AI assistant for mental health."
}

# Mental health keywords to check if query is related to mental health
MENTAL_HEALTH_KEYWORDS = [
    "anxiety", "depression", "stress", "therapy", "counseling", "mental health", "panic", "trauma", 
    "mindfulness", "meditation", "psychological", "self-care", "emotion", "feeling", "mood", 
    "psychiatry", "therapist", "psychologist", "burnout", "coping", "ptsd", "ocd", "adhd", "bipolar", 
    "schizophrenia", "grief", "insomnia", "wellbeing", "mental wellbeing", "mental wellness", 
    "emotional wellbeing", "emotional wellness", "self-help", "relaxation", "breathing exercise",
    "cognitive behavioral", "cbt", "sad", "angry", "worried", "anxious", "depressed", "overwhelmed",
    "psychology", "mental disorder", "suicide", "suicidal", "crisis", "loneliness", "lonely", "isolated",
    "phobia", "trauma", "addiction", "substance abuse", "eating disorder", "self-esteem", "confidence"
]

# Function to check if a query is related to mental health
def is_mental_health_query(query):
    query_lower = query.lower()
    return any(keyword in query_lower for keyword in MENTAL_HEALTH_KEYWORDS)

# πŸ”Ή Function: Generate Response Using RAG 
def generate_rag_response(query, model, tokenizer, index, answers, embedding_model, chat_history, system_message, max_new_tokens=200):
    """Generate response using retrieved context, chat history, and system message."""

    # Check if query is mental health related when in mental health mode
    if system_message == SYSTEM_MESSAGES["mental_health"] and not is_mental_health_query(query):
        return "I'm currently in mental health assistant mode and can only answer questions related to mental health, psychology, emotions, and wellbeing. If you'd like to discuss other topics, please change the system message to general assistant mode."
    
    query_lower = query.strip().lower()

    # Define follow-up triggers  
    follow_up_triggers = ["continue", "go on", "tell me more", "what else?", "elaborate", "explain more"]
    is_follow_up = any(trigger in query_lower for trigger in follow_up_triggers)

    # Retrieve relevant knowledge  
    retrieved_context = retrieve_relevant_context(query, index, answers, embedding_model).strip()

    # Store last 5 exchanges in chat history (limit it)
    chat_context = "\n".join([msg["content"] for msg in chat_history[-5:]])

    # Handle Follow-up Responses More Effectively  
    if is_follow_up and chat_history:
        last_ai_response = chat_history[-1]  # Get last AI message
        query = f"{last_ai_response['content']} {query}"  # Append the follow-up to continue smoothly

    # Few-Shot Examples for Mental Health mode only
    few_shot_examples = """
User: What are some effective meditation techniques for beginners?
AI: Based on established research, here are some effective meditation techniques for beginners:

1. Mindful Breathing - Focus on your breath, noticing the sensation of air moving in and out of your body.
2. Body Scan Meditation - Progressively relax different parts of your body from head to toe.
3. Loving-Kindness Meditation - Direct positive wishes toward yourself and others.
4. Guided Meditation - Follow along with recorded instructions to help maintain focus.
5. Walking Meditation - Practice mindfulness while walking slowly and deliberately.

I recommend starting with just 5 minutes daily and gradually increasing duration. Would you like more details about any specific technique, or would you prefer suggestions for apps that could help you get started?

User: Tell me more about body scan meditation.
AI: Body scan meditation is a mindfulness practice where you systematically focus your attention on different parts of your body, from your toes to the top of your head.

Here's how to practice it:
1. Lie down in a comfortable position or sit in a chair with your feet on the ground
2. Close your eyes and take several deep breaths
3. Begin by bringing awareness to your toes, noticing any sensations (tingling, temperature, tension)
4. Gradually move your attention upward (feet, ankles, calves, etc.)
5. For each body part, notice sensations without judgment, then consciously relax that area
6. If you notice pain or discomfort, acknowledge it with compassion before moving on
7. Continue until you've scanned your entire body

Regular practice can help reduce stress, improve body awareness, and release physical tension. Many find it helpful for sleep issues as well.

Would you like guidance on how long to practice or how to handle common challenges like mind-wandering during body scans?
"""

    # Build prompt depending on system message
    if system_message == SYSTEM_MESSAGES["mental_health"]:
        prompt = f"""You are a compassionate mental wellness assistant powered by a retrieval-augmented generation (RAG) system.
Maintain a warm, supportive conversation while providing evidence-based mental health information.

SYSTEM MESSAGE: {system_message}

When responding to mental health queries:
- Provide evidence-based information from reputable sources
- Keep the tone supportive, non-judgmental, and empathetic
- If uncertain about mental health information, acknowledge limitations and avoid potentially harmful advice
- For crisis situations, recommend professional help rather than providing advice
- Frame mental wellness information in a way that emphasizes self-compassion and gradual progress

If the user expresses overwhelm, anxiety, or difficulty with their mental health, offer evidence-based coping strategies and gentle support.

{few_shot_examples if len(chat_history) > 0 else ""}
"""
    else:
        prompt = f"""You are a helpful AI assistant designed to answer questions on a wide range of topics.
Provide accurate, concise information while maintaining a conversational tone.

SYSTEM MESSAGE: {system_message}

When responding to queries:
- Provide accurate information on the requested topic
- Keep explanations clear and easy to understand
- Be conversational and friendly
- If you don't know something, acknowledge limitations rather than making up information
"""

    # Only append the relevant context if available
    if retrieved_context:
        prompt += f"\nπŸ“Œ Relevant Information Retrieved:\n{retrieved_context}\n\n"

    prompt += f"πŸ’¬ Chat History:\n{chat_context}\nπŸ‘€ User: {query}\nπŸ€– AI:"

    # Tokenize input  
    input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)

    # Generate AI response  
    with torch.no_grad():
        output_ids = model.generate(
            input_ids, 
            max_new_tokens=max_new_tokens, 
            eos_token_id=tokenizer.eos_token_id,
        )

    response = tokenizer.decode(output_ids[0], skip_special_tokens=True).strip()

    # Prevent prompt repetition in response  
    if response.startswith(prompt):
        response = response[len(prompt):].strip()

    return response


# πŸ”Ή Function to save feedback for RLHF
from huggingface_hub import create_repo, HfApi
from datasets import Dataset, load_dataset
import pandas as pd
import os
import json

def save_feedback(query, response, feedback, comment=""):
    feedback_file = "chat_feedback.csv"
    repo_name = "AvocadoMuffin/Chat_Feedback_Data"
    
    # Create new data from current feedback
    new_data = pd.DataFrame([[query, response, feedback, comment]], 
                          columns=["Query", "Response_Generated", "Feedback", "Comment"])
    
    try:
        # First check if we have a local copy of the data
        if os.path.exists(feedback_file):
            existing_data = pd.read_csv(feedback_file)
            print(f"Loaded existing data from local file with {len(existing_data)} records")
        else:
            # Try to download the dataset from HF Hub
            api = HfApi()
            try:
                # List all files in the repository to find our data
                files = api.list_repo_files(repo_id=repo_name, repo_type="dataset")
                
                # Look for parquet files which contain our data
                data_files = [f for f in files if f.endswith('.parquet') or f.endswith('.csv')]
                
                if data_files:
                    # Download the first data file we find
                    file_path = api.hf_hub_download(
                        repo_id=repo_name,
                        filename=data_files[0],
                        repo_type="dataset"
                    )
                    
                    # Load the data based on file extension
                    if file_path.endswith('.parquet'):
                        existing_data = pd.read_parquet(file_path)
                    else:
                        existing_data = pd.read_csv(file_path)
                        
                    print(f"Successfully loaded data from Hub with {len(existing_data)} records")
                else:
                    print("No data files found in repository")
                    existing_data = pd.DataFrame(columns=["Query", "Response_Generated", "Feedback", "Comment"])
            except Exception as e:
                print(f"Error accessing repository: {e}")
                existing_data = pd.DataFrame(columns=["Query", "Response_Generated", "Feedback", "Comment"])
        
        # Append new feedback to the existing data
        updated_data = pd.concat([existing_data, new_data], ignore_index=True)
        print(f"Updated dataset now has {len(updated_data)} records")
        
        # Always save locally for backup and future reference
        updated_data.to_csv(feedback_file, index=False)
        print(f"Saved updated data to local file with {len(updated_data)} records")
        
        # Convert to Hugging Face Dataset
        dataset = Dataset.from_pandas(updated_data)
        
        # Create repo if needed and push to Hugging Face Hub
        create_repo(repo_id=repo_name, repo_type="dataset", exist_ok=True)
        dataset.push_to_hub(repo_name)
        print("Feedback saved and dataset updated on Hugging Face!")
        
    except Exception as e:
        print(f"Error updating dataset: {e}")
        # Still save locally as backup
        if os.path.exists(feedback_file):
            local_data = pd.read_csv(feedback_file)
            pd.concat([local_data, new_data], ignore_index=True).to_csv(feedback_file, index=False)
        else:
            new_data.to_csv(feedback_file, index=False)
        print("Saved locally only due to error")
    
    return "Feedback saved and dataset updated!"


# πŸ”Ή The chatbot function (with RAG and conversation history tracking)
def chatbot(query, chat_history=None, system_message=SYSTEM_MESSAGES["general"]):
    if chat_history is None:
        chat_history = []  # Initialize chat_history as an empty list
    
    # Append user message to the chat history
    chat_history.append({"role": "user", "content": query})
    
    # Generate response using the RAG model with specified system message
    response = generate_rag_response(query, model, tokenizer, index, answers, embedding_model, chat_history, system_message)
    
    # Append the assistant's response to the chat history for context
    chat_history.append({"role": "assistant", "content": response})
    
    # Format chat history with HTML and CSS for better visual appeal
    formatted_chat_history = ""
    for msg in chat_history:
        if msg["role"] == "user":
            formatted_chat_history += f'<p style="color: #4A90E2; font-weight: bold;">User:</p><p style="background-color: #E7F1F8; padding: 10px; border-radius: 5px; font-family: Arial, sans-serif; font-size: 14px;">{msg["content"]}</p>'
        else:
           formatted_chat_history += f'<p style="color: #1D4ED8; font-weight: bold;">AI:</p><p style="background-color: #D1F4D9; padding: 10px; border-radius: 5px; font-family: Arial, sans-serif; font-size: 14px;">{msg["content"]}</p>'
    
    # Return the formatted chat history as HTML and the updated chat history
    # Make sure to reset the feedback radio button value to None when showing new feedback options
    return gr.HTML(formatted_chat_history), chat_history, gr.update(visible=True, value=None), gr.update(visible=True), gr.update(visible=True, value=""), response, gr.update(value=""), query

# Function to handle feedback submission
def handle_feedback(query, latest_response, feedback, comment):
    # Store feedback as None if not selected
    if feedback:  # Only save if feedback has a value
        feedback_value = 1 if feedback == "πŸ‘ Like" else 0
        save_feedback(query, latest_response, feedback_value, comment)  # Save feedback data
    else:
        # If no feedback is selected, save as NULL or handle accordingly
        save_feedback(query, latest_response, None, comment)
        
    # Hide feedback UI after submission and clear both the comment field and feedback selection
    return gr.update(visible=False, value=None), gr.update(visible=False), gr.update(visible=False, value="")

# Function to handle system message change and reset chat
def change_system_message(message_key, chat_history_state):
    # Get the actual system message
    selected_message = SYSTEM_MESSAGES[message_key]
    
    # Reset chat history when mode changes
    new_chat_history = []
    
    # Create welcome message based on selected mode
    if message_key == "mental_health":
        welcome_message = "I'm now in mental health assistant mode. I can help with questions about mental health, wellness, emotions, and psychological well-being. How can I support you today?"
    else:
        welcome_message = "I'm here to help with a wide range of questions and tasks. What would you like assistance with today?"
    
    # Add welcome message to chat history
    new_chat_history.append({"role": "assistant", "content": welcome_message})
    
    # Format the welcome message for display
    formatted_chat_history = f'<p style="color: #1D4ED8; font-weight: bold;">AI:</p><p style="background-color: #D1F4D9; padding: 10px; border-radius: 5px; font-family: Arial, sans-serif; font-size: 14px;">{welcome_message}</p>'
    
    return gr.HTML(formatted_chat_history), new_chat_history, selected_message

def create_gradio_interface():
    with gr.Blocks() as demo:
        gr.Markdown("## **🧠 Mindful Bot: AI for Everything**")
        gr.Markdown("An AI chatbot designed to offer empathetic support for mental wellness using RAG-based techniques, capable of handling adversarial queries with care. It also provides insightful responses to general queries, separating wellness-related conversations for more focused, compassionate assistance.")

        latest_query = gr.State("")
        latest_response = gr.State("")
        chat_history_state = gr.State([])
        current_system_message = gr.State(SYSTEM_MESSAGES["general"])
        
        with gr.Row():
            with gr.Column(scale=7):  # Chat area
                # Use gr.HTML to display the formatted chat history
                chat_history_box = gr.HTML(label="Chat History", elem_id="chat-history")  
                    
                with gr.Row():
                    with gr.Column(scale=8):
                        query = gr.Textbox(label="Enter your query", elem_id="user-input", lines=3)
                    with gr.Column(scale=2):
                        submit_button = gr.Button("Submit", elem_id="submit-button", variant="primary")
                    
                # Feedback UI - make it optional with no default selection
                feedback = gr.Radio(choices=["πŸ‘ Like", "πŸ‘Ž Dislike"], label="Feedback (Optional)", type="value", visible=False)
                submit_feedback_button = gr.Button("Submit Feedback", visible=False)
                feedback_comments = gr.Textbox(label="Additional Comments (optional)", lines=2, visible=False)
            
            with gr.Column(scale=3):  # System message area
                with gr.Group():
                    gr.HTML("<h2 style='text-align: center; color: white; background-color: orange; padding: 10px;'>System message</h2>")
                    
                    # Example table with system messages
                    example_table = gr.Dataframe(
                        headers=["Message", "System message"],
                        datatype=["str", "str"],
                        value=[
                            ["Hello! How are you?", SYSTEM_MESSAGES["mental_health"]],
                            ["Can you help with a recipe for baking a cake?", SYSTEM_MESSAGES["general"]],
                        ],
                        row_count=2,
                        col_count=2,
                        interactive=True,
                        #height=150
                    )
                    
                    # System message selector
                    system_selector = gr.Radio(
                        choices=["general", "mental_health"],
                        value="general",
                        label="Select system message type",
                        elem_id="system-selector",
                        info="Choose the type of assistant"
                    )
                    
                    # Display the current system message
                    system_message_display = gr.Textbox(
                        value=SYSTEM_MESSAGES["general"],
                        label="Current system instruction:",
                        interactive=False
                    )
                    
                    # Reset button
                    reset_button = gr.Button("Reset Chat", elem_id="reset-button")
                    
                    # Additional inputs section
                    #gr.Markdown("### Additional Inputs")

        # Add custom CSS with HTML component
        gr.HTML("""<style>
            #chat-history { background-color: #f5f5f5; border-radius: 10px; padding: 10px; max-height: 400px; overflow-y: scroll; font-family: 'Arial', sans-serif; }
            #user-input { margin-top: 10px; border-radius: 10px; }
            #submit-button { margin-top: 10px; }
            #system-selector { margin-top: 10px; }
        </style>""")
        
        # Event handlers
        query.submit(chatbot, 
                    inputs=[query, chat_history_state, current_system_message], 
                    outputs=[chat_history_box, chat_history_state, feedback, submit_feedback_button, feedback_comments, latest_response, query, latest_query])
        
        submit_button.click(chatbot, 
                          inputs=[query, chat_history_state, current_system_message], 
                          outputs=[chat_history_box, chat_history_state, feedback, submit_feedback_button, feedback_comments, latest_response, query, latest_query])
        
        # Modified feedback handling
        submit_feedback_button.click(handle_feedback, 
                                   inputs=[latest_query, latest_response, feedback, feedback_comments], 
                                   outputs=[feedback, submit_feedback_button, feedback_comments])
        
        # System message change handler
        system_selector.change(change_system_message,
                             inputs=[system_selector, chat_history_state],
                             outputs=[chat_history_box, chat_history_state, current_system_message])
        
        # Also update system message display when selector changes
        system_selector.change(lambda x: SYSTEM_MESSAGES[x],
                             inputs=[system_selector],
                             outputs=[system_message_display])
        
        # Reset button handler - uses the same function as system_selector.change
        reset_button.click(change_system_message,
                         inputs=[system_selector, chat_history_state],
                         outputs=[chat_history_box, chat_history_state, current_system_message])
        
        # Also reset system message display with reset button
        reset_button.click(lambda x: SYSTEM_MESSAGES[x],
                         inputs=[system_selector],
                         outputs=[system_message_display])
        
        # Initialize with welcome message on load
        demo.load(change_system_message,
                inputs=[system_selector, chat_history_state],
                outputs=[chat_history_box, chat_history_state, current_system_message])
        
        # Initialize system message display on load
        demo.load(lambda x: SYSTEM_MESSAGES[x],
                inputs=[system_selector],
                outputs=[system_message_display])
        
    return demo

# Create and launch the Gradio interface
demo = create_gradio_interface()
demo.launch(share=False, debug=True)