Spaces:
Sleeping
Sleeping
| """ | |
| HSAN1 Research Assistant - Final High Contrast Input Fix | |
| """ | |
| import os | |
| import gradio as gr | |
| from dotenv import load_dotenv | |
| from langchain_google_genai import ChatGoogleGenerativeAI | |
| from langchain_huggingface import HuggingFaceEmbeddings | |
| from langchain_community.vectorstores import FAISS | |
| from langchain_core.messages import HumanMessage, SystemMessage | |
| # Load environment variables | |
| load_dotenv() | |
| # --- 1. ENHANCED CSS FOR ACCESSIBILITY AND UI DESIGN --- | |
| css = """ | |
| footer {display:none !important; visibility:hidden !important;} | |
| .show-api {display:none !important;} | |
| .built-with {display:none !important;} | |
| /* 1. Background Outline */ | |
| .gradio-container { | |
| border: 3px solid #0056b3 !important; | |
| border-radius: 15px; | |
| padding: 20px !important; | |
| background-color: #ffffff !important; | |
| } | |
| /* 2. Description Text */ | |
| .custom-description { | |
| font-size: 1.4rem !important; | |
| color: #000000 !important; | |
| font-weight: 700 !important; | |
| line-height: 1.4; | |
| margin-bottom: 20px; | |
| } | |
| /* 3. Assistant Responses */ | |
| .message-wrap .message, .prose p, .prose span, label { | |
| color: #000000 !important; | |
| font-weight: 700 !important; | |
| font-size: 1.1rem !important; | |
| } | |
| /* 4. FORCED HIGH CONTRAST PLACEHOLDER (The "Enter question here" text) */ | |
| /* We use multiple selectors to ensure it stays solid black and bold */ | |
| input::placeholder { | |
| color: #000000 !important; | |
| opacity: 1 !important; | |
| font-weight: 900 !important; | |
| } | |
| ::-webkit-input-placeholder { | |
| color: #000000 !important; | |
| opacity: 1 !important; | |
| } | |
| ::-moz-placeholder { | |
| color: #000000 !important; | |
| opacity: 1 !important; | |
| } | |
| :-ms-input-placeholder { | |
| color: #000000 !important; | |
| opacity: 1 !important; | |
| } | |
| /* 5. Button Styling */ | |
| .gr-button-secondary { | |
| border: 2px solid #000000 !important; | |
| color: #000000 !important; | |
| font-weight: bold !important; | |
| } | |
| """ | |
| # Configuration | |
| INDEX_PATH = "./faiss_index" | |
| SYSTEM_PROMPT = """You are a compassionate medical research assistant helping patients and families understand HSAN1. | |
| You have access to a database of 246 research documents including papers, newsletters, and family histories. | |
| Instructions: | |
| - Answer questions based ONLY on the provided context. | |
| - If the answer is not in the context, say "I don't see that information in the research documents I have." | |
| - Use clear, empathetic language and explain medical terms. | |
| - Be accurate but hopeful in tone. | |
| - Keep responses concise but informative.""" | |
| # Check for API key | |
| api_key = os.environ.get("GOOGLE_API_KEY") | |
| if not api_key: | |
| raise ValueError("GOOGLE_API_KEY environment variable not set") | |
| # Load components | |
| print("Loading embeddings model...") | |
| embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2") | |
| print("Loading FAISS index...") | |
| vectorstore = FAISS.load_local(INDEX_PATH, embeddings, allow_dangerous_deserialization=True) | |
| retriever = vectorstore.as_retriever(search_kwargs={"k": 5}) | |
| print("Initializing Gemini...") | |
| llm = ChatGoogleGenerativeAI( | |
| model="gemini-3-flash-preview", | |
| temperature=0.3, | |
| streaming=True | |
| ) | |
| def respond(message, history): | |
| docs = retriever.invoke(message) | |
| context = "\n\n---\n\n".join([doc.page_content for doc in docs]) | |
| sources = list(set([os.path.basename(doc.metadata.get("source", "Unknown")) for doc in docs])) | |
| augmented_prompt = f"Context:\n{context}\n---\nUser question: {message}" | |
| messages = [SystemMessage(content=SYSTEM_PROMPT), HumanMessage(content=augmented_prompt)] | |
| response = "" | |
| for chunk in llm.stream(messages): | |
| if chunk.content: | |
| content = chunk.content | |
| if isinstance(content, list): | |
| content = "".join([i.get('text', '') if isinstance(i, dict) else i for i in content]) | |
| response += content | |
| yield response | |
| if sources: | |
| yield response + f"\n\n---\n*Sources: {', '.join(sources[:3])}*" | |
| # --- 2. INTERFACE SETUP --- | |
| with gr.Blocks(css=css, title="HSAN1 Research Assistant") as demo: | |
| gr.Markdown("# 🧬 HSAN1 Research Assistant") | |
| # Instruction line | |
| gr.HTML("<p class='custom-description'>Scroll down to enter your question. Responses are based solely on contents of this website.</p>") | |
| gr.ChatInterface( | |
| respond, | |
| type="messages", | |
| theme="base", | |
| # Custom label for the Assistant response box | |
| chatbot=gr.Chatbot(label="The Assistant's response will appear in this box. This may take a moment or two.", show_label=True), | |
| # Custom placeholder for the input box | |
| textbox=gr.Textbox(placeholder="Enter question here", container=False, scale=7), | |
| ) | |
| demo.footer_links = [] | |
| if __name__ == "__main__": | |
| demo.launch(show_api=False) |