Spaces:
Sleeping
Sleeping
| import pandas as pd | |
| import numpy as np | |
| import faiss | |
| import gradio as gr | |
| from sentence_transformers import SentenceTransformer | |
| from gtts import gTTS | |
| import tempfile | |
| # Load transformer model | |
| model = SentenceTransformer('all-MiniLM-L6-v2') | |
| # Load and prepare the FAQ data | |
| def load_static_csv(): | |
| df = pd.read_csv("faq.csv") | |
| df.columns = ['question', 'answer'] | |
| return df | |
| # Load data and build FAISS index | |
| data = load_static_csv() | |
| question_embeddings = model.encode(data['question'].tolist()) | |
| faq_index = faiss.IndexFlatL2(question_embeddings.shape[1]) | |
| faq_index.add(np.array(question_embeddings)) | |
| # Function to return answer text and audio file | |
| def ask_question(query, k=1): | |
| query_embedding = model.encode([query]) | |
| D, I = faq_index.search(np.array(query_embedding), k=k) | |
| results = "" | |
| for idx in I[0]: | |
| a = data.iloc[idx]['answer'] | |
| results += f"{a}\n\n" | |
| results = results.strip() | |
| # Convert text to speech | |
| tts = gTTS(text=results, lang='en') | |
| temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") | |
| tts.save(temp_file.name) | |
| return results, temp_file.name | |
| custom_css = """ | |
| body, html, .gradio-container { | |
| background-color: #FFDDEE !important; /* baby blue */ | |
| color: black !important; | |
| font-family: 'Segoe UI', sans-serif; | |
| } | |
| /* Title */ | |
| h1 { | |
| color: black !important; | |
| text-align: center; | |
| font-weight: 700; | |
| margin-top: 20px; | |
| } | |
| /* Subheading text */ | |
| .gr-markdown > p { | |
| color: black !important; | |
| text-align: center; | |
| font-size: 16px; | |
| font-weight: 500; | |
| } | |
| /* Labels and inputs */ | |
| label { | |
| color: black !important; | |
| font-weight: bold; | |
| } | |
| textarea, input[type="text"] { | |
| border-radius: 10px !important; | |
| padding: 10px; | |
| border: 1px solid #aaa; | |
| background-color: white; | |
| color: black; | |
| } | |
| /* Button */ | |
| button { | |
| background-color: white !important; | |
| color: black !important; | |
| font-weight: bold; | |
| border-radius: 10px; | |
| padding: 10px 20px; | |
| border: 2px solid black; | |
| margin-top: 10px; | |
| cursor: pointer; | |
| } | |
| /* Answer box */ | |
| #answer-box { | |
| background-color: white; | |
| color: black !important; | |
| border-radius: 12px; | |
| padding: 16px; | |
| font-size: 16px; | |
| border: 1px solid #ccc; | |
| box-shadow: 0 2px 6px rgba(0, 0, 0, 0.1); | |
| margin-top: 10px; | |
| } | |
| #answer-box p { | |
| color: black !important; | |
| } | |
| /* AUDIO PLAYER FIXES */ | |
| audio { | |
| width: 100% !important; | |
| } | |
| .gr-audio { | |
| background-color: white; | |
| border-radius: 10px; | |
| padding: 16px; | |
| border: 1px solid #bbb; | |
| margin-top: 10px; | |
| } | |
| /* Target speed and audio buttons better */ | |
| .gr-audio .speed-button, | |
| .gr-audio .audio-button { | |
| border: 2px solid black; | |
| color: black !important; | |
| background-color: white !important; | |
| border-radius: 8px; | |
| padding: 4px 10px; | |
| font-weight: bold; | |
| min-width: 48px; | |
| text-align: center; | |
| display: inline-block; | |
| } | |
| .gr-audio .speed-button span { | |
| color: black !important; | |
| font-weight: bold; | |
| display: inline-block; | |
| width: 100%; | |
| text-align: center; | |
| } | |
| /* Make sure play/pause button is visible */ | |
| .gr-audio .play-button svg, | |
| .gr-audio .pause-button svg { | |
| fill: black !important; | |
| height: 24px; | |
| width: 24px; | |
| } | |
| """ | |
| #Gradio UI | |
| with gr.Blocks(css=custom_css) as demo: | |
| gr.Markdown( | |
| "<h1>🧘 MentalWell Q&A</h1>" | |
| "<p>Ask any mental health related question based on our FAQ knowledge base.</p>" | |
| ) | |
| with gr.Column(): | |
| query_input = gr.Textbox(label="Ask a Question", placeholder="Type your question here…", lines=1) | |
| ask_button = gr.Button("Get Answer") | |
| with gr.Column(): | |
| output_text = gr.Markdown(elem_id="answer-box") | |
| output_audio = gr.Audio(label="Listen", type="filepath") | |
| ask_button.click(fn=ask_question, inputs=query_input, outputs=[output_text, output_audio]) | |
| query_input.submit(fn=ask_question, inputs=query_input, outputs=[output_text, output_audio]) | |
| demo.launch() |