Spaces:
Sleeping
Sleeping
File size: 9,407 Bytes
5c15993 891362c 5c15993 891362c 5c15993 d3b5770 5c15993 d3b5770 5c15993 e55a777 5c15993 2ce7f3a fe1870a 5c15993 4db3b38 25bd3ad d3b5770 06850f9 d3b5770 5c15993 d3b5770 1df6e3d d3b5770 5c15993 8852afc 891362c 5c15993 891362c 8852afc 051a821 8852afc ceb21a8 8852afc 051a821 8852afc 051a821 8852afc 5c15993 564dce6 abc403f 8852afc f038ed9 69c1a6c 4db3b38 051a821 f038ed9 4db3b38 69c1a6c 4db3b38 051a821 f038ed9 051a821 f038ed9 051a821 4db3b38 051a821 4db3b38 051a821 f038ed9 4db3b38 051a821 37f05cf f038ed9 051a821 d39d66e f038ed9 051a821 9f6e673 8852afc 5c15993 8852afc c097de2 6964c2e 3aaa1f8 6964c2e 3b5f55b 564dce6 66b97d2 564dce6 66b97d2 564dce6 08e0ebf 3b5f55b 453b257 3b5f55b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 | # streamlit_app.py
import streamlit as st
from src.chatbot import BioethicsChatbot
import time
st.set_page_config(page_title="Bioethics AI Assistant", page_icon="π§¬", layout="wide")
st.title("𧬠Bioethics AI Assistant")
st.markdown("*Ask questions about medical ethics, informed consent, research ethics, and more*")
# CSS: fix input bar to bottom, style chat area and bubbles, and reserve vertical space
st.markdown(
"""
<style>
/* Chat-area wrapper */
#chat-area {
width: 100%;
max-width: 800px;
margin: 0;
}
/* User question */
#user {
top: 18px;
z-index: 10;
padding: 12px 16px;
border-radius: 12px;
background: var(--secondary-background-color);
margin-bottom: 12px;
will-change: transform;
}
/* Message bubbles */
.msg {
padding: 14px 16px;
border-radius: 12px;
margin: 10px 0;
max-width: 65%;
line-height: 1.45;
}
.msg.user {
background: #101726;
color: white;
margin-left: auto;
}
.msg.assistant {
background: #262730;
color: white;
margin-right: auto;
}
/* Fix the input form (id = input_form) at the bottom, centered */
form#input_form {
position: fixed;
bottom: 18px;
left: 50%;
transform: translateX(-50%);
width: 66.7%;
z-index: 9999;
background: transparent;
}
/* optional small visual tweak */
form#input_form .stTextInput, form#input_form .stButton {
margin: 0 6px;
}
form#input_form .stTextInput, form#input_form .stButton {
margin: 0 6px;
}
/* Reserve space at the bottom equal to input form height */
.block-container {
padding-bottom: 120px;
}
</style>
""",
unsafe_allow_html=True,
)
# session state
if 'messages' not in st.session_state:
# messages is a list of {"role": "user"|"assistant", "content": str}
st.session_state.messages = []
if 'is_streaming' not in st.session_state:
st.session_state.is_streaming = False
if 'show_sticky' not in st.session_state:
st.session_state.show_sticky = False
# if a submission was made, store it as pending so next render can create placeholders nicely
if 'pending_question' not in st.session_state:
st.session_state.pending_question = None
# cached chatbot resource
@st.cache_resource
def load_chatbot():
return BioethicsChatbot("data/")
if 'bot' not in st.session_state:
with st.spinner("π Loading bioethics knowledge base..."):
st.session_state.bot = load_chatbot()
#History
def build_history_pairs(messages, max_pairs=4):
pairs = []
i = 0
while i < len(messages) - 1:
if messages[i]['role'] == 'user' and messages[i+1]['role'] == 'assistant':
user = messages[i]['content']
assistant = messages[i+1]['content']
if assistant is not None and assistant != "":
pairs.append((user, assistant))
i += 2
else:
i += 1
return pairs[-max_pairs:]
# Layout columns
col_left, col1, col2 = st.columns([1, 4, 1])
with col1:
st.markdown("### π¬ Conversation")
# Input form (kept logically first so submit handling happens before rendering),
# CSS will pin it visually to the bottom (form id = input_form).
# Centered chat container for better visuals
# render messages in strict chronological order (user -> assistant -> user -> assistant)
# track the first in-flight assistant placeholder (content == "")
response_placeholder = None
inflight_index = None
for idx, msg in enumerate(st.session_state.messages):
if msg['role'] == 'user':
# user bubble (dark)
st.markdown(
f"""
<div style="display:flex; justify-content:center; width:100%;">
<div class="msg user">{msg['content']}</div>
</div>
""",
unsafe_allow_html=True
)
else:
# assistant bubble
if msg['content'] == "":
response_placeholder = st.empty()
inflight_index = idx
response_placeholder.markdown(
"""
<div style="display:flex; justify-content:center; width:100%;">
<div id="assistant-inflight" class="msg assistant"></div>
</div>
""",
unsafe_allow_html=True
)
else:
st.markdown(
f"""
<div style="display:flex; justify-content:center; width:100%;">
<div class="msg assistant">{msg['content']}</div>
</div>
""",
unsafe_allow_html=True
)
# If there's an inflight placeholder and we're not already streaming, start streaming now
if response_placeholder is not None and not st.session_state.get("is_streaming", False):
st.session_state.is_streaming = True
# build history pairs from only completed assistant responses (exclude the in-flight assistant)
history_pairs = build_history_pairs(st.session_state.messages[:inflight_index])
st.session_state.bot.stream_handler.current_text = ""
st.session_state.bot.stream_handler.placeholder = response_placeholder
# perform the streaming call (this will update response_placeholder via the callback)
try:
user_question = st.session_state.messages[inflight_index - 1]['content']
answer = st.session_state.bot.ask(user_question, history_pairs=history_pairs)
except Exception as e:
answer = f"β Error while generating response: {e}"
# finalize UI and persist final assistant text
try:
# put final content into the same placeholder (removes streaming cursor)
response_placeholder.markdown(
f"<div style='background:#262730;color:#fff;padding:14px;border-radius:12px;margin:10px 0;max-width:85%;'>"
f"{answer}</div>",
unsafe_allow_html=True
)
except Exception:
pass
# save final answer back into session_state messages
st.session_state.messages[inflight_index]['content'] = answer
st.session_state.is_streaming = False
question = st.chat_input("Your question:")
if question:
if st.session_state.get("is_streaming", False):
st.warning("Please wait for the current response to finish.")
elif st.session_state.get('query_count', 0) < 30:
st.session_state.messages.append({"role": "user", "content": question})
st.session_state.messages.append({"role": "assistant", "content": ""})
st.session_state.query_count = st.session_state.get('query_count', 0) + 1
st.rerun() # Add this line
else:
st.error("π Demo limit reached for today. This prevents API abuse.")
with col2:
if 'query_count' not in st.session_state:
st.session_state.query_count = 0
st.metric("Queries used in your session", f"{st.session_state.query_count}/30")
st.markdown("---")
with st.expander("π About the Sources"):
st.markdown("""
This assistant searches through open-access bioethics papers to find relevant information.
**Search Process:**
1. Your question is converted to embeddings
2. Similar text chunks are found using FAISS vector search
3. Only chunks with similarity score β₯ 0.65 are used for citations
4. The language model synthesizes an answer from these sources
""")
st.markdown("**π License**")
st.markdown("- [Open Source Papers Used](https://huggingface.co/spaces/ciorant/bioethics-rag/blob/main/LICENSE_INFO.md)")
st.markdown("**Tech Stack**")
st.markdown("- Python & Streamlit")
st.markdown("- OpenAI GPT-4o-mini")
st.markdown("- FAISS Vector Search")
st.markdown("- LangChain")
st.markdown("**π Demo Stats**")
if 'bot' in st.session_state and hasattr(st.session_state.bot, 'vector_store'):
doc_count = len(st.session_state.bot.vector_store.documents)
st.markdown(f"- {doc_count} text chunks indexed")
st.markdown(f"- Vector dimension: {st.session_state.bot.vector_store.dimension}")
st.markdown(f"- Queries today: {st.session_state.get('query_count', 0)}")
with col_left:
st.markdown("### π‘ Sample Questions")
sample_questions = [
"What are the key principles of informed consent?",
"What is the moral side of genomic testing?",
"How should we approach clinical trial ethics?",
"Should assistance in dying be legal?"
]
for q in sample_questions:
if st.button(q, key=f"sample_{hash(q)}", use_container_width=True):
if not st.session_state.get("is_streaming", False) and st.session_state.get('query_count', 0) < 30:
st.session_state.messages.append({"role": "user", "content": q})
st.session_state.messages.append({"role": "assistant", "content": ""})
st.session_state.query_count += 1
st.rerun()
|