Spaces:
Sleeping
Sleeping
| # streamlit_app.py | |
| import streamlit as st | |
| from src.chatbot import BioethicsChatbot | |
| import time | |
| st.set_page_config(page_title="Bioethics AI Assistant", page_icon="π§¬", layout="wide") | |
| st.title("𧬠Bioethics AI Assistant") | |
| st.markdown("*Ask questions about medical ethics, informed consent, research ethics, and more*") | |
| # CSS: fix input bar to bottom, style chat area and bubbles, and reserve vertical space | |
| st.markdown( | |
| """ | |
| <style> | |
| /* Chat-area wrapper */ | |
| #chat-area { | |
| width: 100%; | |
| max-width: 800px; | |
| margin: 0; | |
| } | |
| /* User question */ | |
| #user { | |
| top: 18px; | |
| z-index: 10; | |
| padding: 12px 16px; | |
| border-radius: 12px; | |
| background: var(--secondary-background-color); | |
| margin-bottom: 12px; | |
| will-change: transform; | |
| } | |
| /* Message bubbles */ | |
| .msg { | |
| padding: 14px 16px; | |
| border-radius: 12px; | |
| margin: 10px 0; | |
| max-width: 65%; | |
| line-height: 1.45; | |
| } | |
| .msg.user { | |
| background: #101726; | |
| color: white; | |
| margin-left: auto; | |
| } | |
| .msg.assistant { | |
| background: #262730; | |
| color: white; | |
| margin-right: auto; | |
| } | |
| /* Fix the input form (id = input_form) at the bottom, centered */ | |
| form#input_form { | |
| position: fixed; | |
| bottom: 18px; | |
| left: 50%; | |
| transform: translateX(-50%); | |
| width: 66.7%; | |
| z-index: 9999; | |
| background: transparent; | |
| } | |
| /* optional small visual tweak */ | |
| form#input_form .stTextInput, form#input_form .stButton { | |
| margin: 0 6px; | |
| } | |
| form#input_form .stTextInput, form#input_form .stButton { | |
| margin: 0 6px; | |
| } | |
| /* Reserve space at the bottom equal to input form height */ | |
| .block-container { | |
| padding-bottom: 120px; | |
| } | |
| </style> | |
| """, | |
| unsafe_allow_html=True, | |
| ) | |
| # session state | |
| if 'messages' not in st.session_state: | |
| # messages is a list of {"role": "user"|"assistant", "content": str} | |
| st.session_state.messages = [] | |
| if 'is_streaming' not in st.session_state: | |
| st.session_state.is_streaming = False | |
| if 'show_sticky' not in st.session_state: | |
| st.session_state.show_sticky = False | |
| # if a submission was made, store it as pending so next render can create placeholders nicely | |
| if 'pending_question' not in st.session_state: | |
| st.session_state.pending_question = None | |
| # cached chatbot resource | |
| def load_chatbot(): | |
| return BioethicsChatbot("data/") | |
| if 'bot' not in st.session_state: | |
| with st.spinner("π Loading bioethics knowledge base..."): | |
| st.session_state.bot = load_chatbot() | |
| #History | |
| def build_history_pairs(messages, max_pairs=4): | |
| pairs = [] | |
| i = 0 | |
| while i < len(messages) - 1: | |
| if messages[i]['role'] == 'user' and messages[i+1]['role'] == 'assistant': | |
| user = messages[i]['content'] | |
| assistant = messages[i+1]['content'] | |
| if assistant is not None and assistant != "": | |
| pairs.append((user, assistant)) | |
| i += 2 | |
| else: | |
| i += 1 | |
| return pairs[-max_pairs:] | |
| # Layout columns | |
| col_left, col1, col2 = st.columns([1, 4, 1]) | |
| with col1: | |
| st.markdown("### π¬ Conversation") | |
| # Input form (kept logically first so submit handling happens before rendering), | |
| # CSS will pin it visually to the bottom (form id = input_form). | |
| # Centered chat container for better visuals | |
| # render messages in strict chronological order (user -> assistant -> user -> assistant) | |
| # track the first in-flight assistant placeholder (content == "") | |
| response_placeholder = None | |
| inflight_index = None | |
| for idx, msg in enumerate(st.session_state.messages): | |
| if msg['role'] == 'user': | |
| # user bubble (dark) | |
| st.markdown( | |
| f""" | |
| <div style="display:flex; justify-content:center; width:100%;"> | |
| <div class="msg user">{msg['content']}</div> | |
| </div> | |
| """, | |
| unsafe_allow_html=True | |
| ) | |
| else: | |
| # assistant bubble | |
| if msg['content'] == "": | |
| response_placeholder = st.empty() | |
| inflight_index = idx | |
| response_placeholder.markdown( | |
| """ | |
| <div style="display:flex; justify-content:center; width:100%;"> | |
| <div id="assistant-inflight" class="msg assistant"></div> | |
| </div> | |
| """, | |
| unsafe_allow_html=True | |
| ) | |
| else: | |
| st.markdown( | |
| f""" | |
| <div style="display:flex; justify-content:center; width:100%;"> | |
| <div class="msg assistant">{msg['content']}</div> | |
| </div> | |
| """, | |
| unsafe_allow_html=True | |
| ) | |
| # If there's an inflight placeholder and we're not already streaming, start streaming now | |
| if response_placeholder is not None and not st.session_state.get("is_streaming", False): | |
| st.session_state.is_streaming = True | |
| # build history pairs from only completed assistant responses (exclude the in-flight assistant) | |
| history_pairs = build_history_pairs(st.session_state.messages[:inflight_index]) | |
| st.session_state.bot.stream_handler.current_text = "" | |
| st.session_state.bot.stream_handler.placeholder = response_placeholder | |
| # perform the streaming call (this will update response_placeholder via the callback) | |
| try: | |
| user_question = st.session_state.messages[inflight_index - 1]['content'] | |
| answer = st.session_state.bot.ask(user_question, history_pairs=history_pairs) | |
| except Exception as e: | |
| answer = f"β Error while generating response: {e}" | |
| # finalize UI and persist final assistant text | |
| try: | |
| # put final content into the same placeholder (removes streaming cursor) | |
| response_placeholder.markdown( | |
| f"<div style='background:#262730;color:#fff;padding:14px;border-radius:12px;margin:10px 0;max-width:85%;'>" | |
| f"{answer}</div>", | |
| unsafe_allow_html=True | |
| ) | |
| except Exception: | |
| pass | |
| # save final answer back into session_state messages | |
| st.session_state.messages[inflight_index]['content'] = answer | |
| st.session_state.is_streaming = False | |
| question = st.chat_input("Your question:") | |
| if question: | |
| if st.session_state.get("is_streaming", False): | |
| st.warning("Please wait for the current response to finish.") | |
| elif st.session_state.get('query_count', 0) < 30: | |
| st.session_state.messages.append({"role": "user", "content": question}) | |
| st.session_state.messages.append({"role": "assistant", "content": ""}) | |
| st.session_state.query_count = st.session_state.get('query_count', 0) + 1 | |
| st.rerun() # Add this line | |
| else: | |
| st.error("π Demo limit reached for today. This prevents API abuse.") | |
| with col2: | |
| if 'query_count' not in st.session_state: | |
| st.session_state.query_count = 0 | |
| st.metric("Queries used in your session", f"{st.session_state.query_count}/30") | |
| st.markdown("---") | |
| with st.expander("π About the Sources"): | |
| st.markdown(""" | |
| This assistant searches through open-access bioethics papers to find relevant information. | |
| **Search Process:** | |
| 1. Your question is converted to embeddings | |
| 2. Similar text chunks are found using FAISS vector search | |
| 3. Only chunks with similarity score β₯ 0.65 are used for citations | |
| 4. The language model synthesizes an answer from these sources | |
| """) | |
| st.markdown("**π License**") | |
| st.markdown("- [Open Source Papers Used](https://huggingface.co/spaces/ciorant/bioethics-rag/blob/main/LICENSE_INFO.md)") | |
| st.markdown("**Tech Stack**") | |
| st.markdown("- Python & Streamlit") | |
| st.markdown("- OpenAI GPT-4o-mini") | |
| st.markdown("- FAISS Vector Search") | |
| st.markdown("- LangChain") | |
| st.markdown("**π Demo Stats**") | |
| if 'bot' in st.session_state and hasattr(st.session_state.bot, 'vector_store'): | |
| doc_count = len(st.session_state.bot.vector_store.documents) | |
| st.markdown(f"- {doc_count} text chunks indexed") | |
| st.markdown(f"- Vector dimension: {st.session_state.bot.vector_store.dimension}") | |
| st.markdown(f"- Queries today: {st.session_state.get('query_count', 0)}") | |
| with col_left: | |
| st.markdown("### π‘ Sample Questions") | |
| sample_questions = [ | |
| "What are the key principles of informed consent?", | |
| "What is the moral side of genomic testing?", | |
| "How should we approach clinical trial ethics?", | |
| "Should assistance in dying be legal?" | |
| ] | |
| for q in sample_questions: | |
| if st.button(q, key=f"sample_{hash(q)}", use_container_width=True): | |
| if not st.session_state.get("is_streaming", False) and st.session_state.get('query_count', 0) < 30: | |
| st.session_state.messages.append({"role": "user", "content": q}) | |
| st.session_state.messages.append({"role": "assistant", "content": ""}) | |
| st.session_state.query_count += 1 | |
| st.rerun() | |