ciorant commited on
Commit
5c15993
·
1 Parent(s): d3b5770

History visible - next attempt

Browse files
Files changed (2) hide show
  1. src/chatbot.py +10 -4
  2. streamlit_app.py +145 -106
src/chatbot.py CHANGED
@@ -19,19 +19,25 @@ from langchain.callbacks.base import BaseCallbackHandler
19
  class StreamHandler(BaseCallbackHandler):
20
  def __init__(self):
21
  self.current_text = ""
22
- self.placeholder = None # we attach this later
23
 
24
  def on_llm_new_token(self, token: str, **kwargs):
25
  self.current_text += token
26
  if self.placeholder is not None:
27
- # Show partial text with a cursor ▌
28
- self.placeholder.markdown(self.current_text + "▌")
 
 
 
 
 
 
 
29
 
30
  def get_text(self):
31
  return self.current_text
32
 
33
 
34
-
35
  class BioethicsChatbot:
36
  def __init__(self, data_dir: str="data/sample_papers"):
37
  self.processor = DocumentProcessor()
 
19
  class StreamHandler(BaseCallbackHandler):
20
  def __init__(self):
21
  self.current_text = ""
22
+ self.placeholder = None # will be set by the UI
23
 
24
  def on_llm_new_token(self, token: str, **kwargs):
25
  self.current_text += token
26
  if self.placeholder is not None:
27
+ try:
28
+ # keep an element id so JS can find & center the in-flight answer
29
+ self.placeholder.markdown(
30
+ f"<div id='assistant-inflight'>{self.current_text}▌</div>",
31
+ unsafe_allow_html=True
32
+ )
33
+ except Exception:
34
+ # placeholder may be invalid during reruns; ignore errors
35
+ pass
36
 
37
  def get_text(self):
38
  return self.current_text
39
 
40
 
 
41
  class BioethicsChatbot:
42
  def __init__(self, data_dir: str="data/sample_papers"):
43
  self.processor = DocumentProcessor()
streamlit_app.py CHANGED
@@ -1,21 +1,58 @@
 
1
  import streamlit as st
2
  from src.chatbot import BioethicsChatbot
3
  import time
4
- from pathlib import Path
5
 
6
- st.set_page_config(
7
- page_title="Bioethics AI Assistant",
8
- page_icon="🧬",
9
- layout="wide"
10
- )
11
 
12
  st.title("🧬 Bioethics AI Assistant")
 
13
 
 
14
  st.markdown(
15
  """
16
  <style>
17
  /* Make room at page bottom so fixed input doesn't overlap messages */
18
- div.block-container { padding-bottom: 180px; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  /* Fix the input form (id = input_form) at the bottom, centered */
21
  form#input_form {
@@ -28,7 +65,7 @@ st.markdown(
28
  background: transparent;
29
  }
30
 
31
- /* Optional: small visual tweak so the fixed form looks natural */
32
  form#input_form .stTextInput, form#input_form .stButton {
33
  margin: 0 6px;
34
  }
@@ -37,30 +74,7 @@ st.markdown(
37
  unsafe_allow_html=True,
38
  )
39
 
40
- st.markdown("*Ask questions about medical ethics, informed consent, research ethics, and more*")
41
-
42
- # Sidebar
43
- with st.sidebar:
44
- st.markdown("### About")
45
- st.write("This demo uses Retrieval-Augmented Generation (RAG) with open-access bioethics papers.")
46
- st.markdown("---")
47
- st.markdown("### Sample Questions")
48
- sample_questions = [
49
- "What is paternalism in bioethics?",
50
- "What are the ethical issues with genetic testing?",
51
- "How should AI bias in healthcare be addressed?",
52
- "Is antinatalism rational?",
53
- "What does it mean for women’s autonomy to be respected?"
54
- ]
55
- for q in sample_questions:
56
- if st.button(q, key=f"sample_{q}", use_container_width=True):
57
- st.session_state.current_question = q
58
-
59
- # Rate limiting
60
- if 'query_count' not in st.session_state:
61
- st.session_state.query_count = 0
62
-
63
- # Chat messages stored in session_state (cleared on page refresh)
64
  if 'messages' not in st.session_state:
65
  # messages is a list of {"role": "user"|"assistant", "content": str}
66
  st.session_state.messages = []
@@ -68,7 +82,14 @@ if 'messages' not in st.session_state:
68
  if 'is_streaming' not in st.session_state:
69
  st.session_state.is_streaming = False
70
 
71
- # Initialize chatbot (cached resource)
 
 
 
 
 
 
 
72
  @st.cache_resource
73
  def load_chatbot():
74
  return BioethicsChatbot("data/")
@@ -77,7 +98,6 @@ if 'bot' not in st.session_state:
77
  with st.spinner("🔄 Loading bioethics knowledge base..."):
78
  st.session_state.bot = load_chatbot()
79
 
80
- # Helper to build history pairs for prompt (only include completed assistant responses)
81
  def build_history_pairs(messages, max_pairs=4):
82
  pairs = []
83
  i = 0
@@ -85,7 +105,6 @@ def build_history_pairs(messages, max_pairs=4):
85
  if messages[i]['role'] == 'user' and messages[i+1]['role'] == 'assistant':
86
  user = messages[i]['content']
87
  assistant = messages[i+1]['content']
88
- # Only include completed assistant responses (non-empty)
89
  if assistant is not None and assistant != "":
90
  pairs.append((user, assistant))
91
  i += 2
@@ -93,101 +112,121 @@ def build_history_pairs(messages, max_pairs=4):
93
  i += 1
94
  return pairs[-max_pairs:]
95
 
96
- # Layout: main chat column + sidebar metrics column
97
  col1, col2 = st.columns([4, 1])
98
 
99
  with col1:
100
  st.markdown("### 💬 Conversation")
101
 
102
- # Render history from session_state (chronological top->bottom)
103
- for msg in st.session_state.messages:
104
- if msg['role'] == "user":
105
- with st.chat_message("user"):
106
- st.markdown(msg['content'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  else:
108
- with st.chat_message("assistant"):
109
- # if assistant content is empty string -> an in-flight response placeholder
110
- if msg['content'] == "":
111
- # create a placeholder we can stream into
112
- st.session_state.current_placeholder = st.empty()
113
- else:
114
- st.markdown(msg['content'])
115
-
116
- # Input form at the bottom (like ChatGPT)
 
 
 
 
117
  st.markdown(
118
  """
119
  <script>
120
- try { window.scrollTo({ top: document.body.scrollHeight, behavior: 'smooth' }); }
121
- catch(e) { /* ignore */ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  </script>
123
  """,
124
  unsafe_allow_html=True,
125
  )
126
 
 
127
  with st.form("input_form", clear_on_submit=True):
128
  question = st.text_input(
129
  "Your question:",
130
- value=st.session_state.get('current_question', ''),
131
  placeholder="e.g., What are the ethical considerations in clinical trials?",
132
  key="question_input"
133
  )
134
  submit = st.form_submit_button("Send")
135
 
136
- # Clear current_question so sample buttons work only once
137
- if 'current_question' in st.session_state:
138
- del st.session_state.current_question
 
 
 
 
139
 
140
- # Handle form submission
141
- if submit and question and st.session_state.query_count < 30:
142
- if st.session_state.is_streaming:
143
- st.warning("Please wait for the current response to finish.")
144
- else:
145
- # Append user message + assistant placeholder (empty string indicates in-flight)
146
- st.session_state.messages.append({"role": "user", "content": question})
147
- st.session_state.messages.append({"role": "assistant", "content": ""})
148
-
149
- # Immediately render the just-appended messages for smooth UX
150
- with st.chat_message("user"):
151
- st.markdown(question)
152
- with st.chat_message("assistant"):
153
- response_placeholder = st.empty()
154
-
155
- # Build history pairs (exclude the in-flight assistant)
156
- history_pairs = build_history_pairs(st.session_state.messages[:-1])
157
-
158
- # Attach handler and stream
159
- st.session_state.bot.stream_handler.current_text = ""
160
- st.session_state.bot.stream_handler.placeholder = response_placeholder
161
- st.session_state.is_streaming = True
162
-
163
- # Call the bot (streams into placeholder)
164
- try:
165
- answer = st.session_state.bot.ask(question, history_pairs=history_pairs)
166
- except Exception as e:
167
- answer = f"❌ Error while generating response: {e}"
168
-
169
- # Finalize UI and persist the assistant message in session_state
170
- try:
171
- response_placeholder.markdown(answer)
172
- except Exception:
173
- # placeholder might be invalid after a rerun; still save the answer
174
- pass
175
-
176
- # update the last assistant (should be the last message)
177
- if st.session_state.messages and st.session_state.messages[-1]['role'] == 'assistant':
178
- st.session_state.messages[-1]['content'] = answer
179
- else:
180
- # fallback: append assistant answer
181
- st.session_state.messages.append({"role": "assistant", "content": answer})
182
-
183
- st.session_state.query_count += 1
184
- st.session_state.is_streaming = False
185
-
186
- elif submit and st.session_state.query_count >= 30:
187
- st.error("📈 Demo limit reached for today. This prevents API abuse.")
188
- st.info("💡 For unlimited use, clone the repository and use your own API key.")
189
 
190
  with col2:
 
 
191
  st.metric("Queries used in your session", f"{st.session_state.query_count}/30")
192
  st.markdown("---")
193
  with st.expander("📚 About the Sources"):
@@ -223,4 +262,4 @@ with col3f:
223
  st.markdown(f"- Vector dimension: {st.session_state.bot.vector_store.dimension}")
224
  except Exception:
225
  pass
226
- st.markdown(f"- Queries today: {st.session_state.query_count}")
 
1
+ # streamlit_app.py
2
  import streamlit as st
3
  from src.chatbot import BioethicsChatbot
4
  import time
 
5
 
6
+ st.set_page_config(page_title="Bioethics AI Assistant", page_icon="🧬", layout="wide")
 
 
 
 
7
 
8
  st.title("🧬 Bioethics AI Assistant")
9
+ st.markdown("*Ask questions about medical ethics, informed consent, research ethics, and more*")
10
 
11
+ # CSS: fix input bar to bottom, style chat area and bubbles, and reserve vertical space
12
  st.markdown(
13
  """
14
  <style>
15
  /* Make room at page bottom so fixed input doesn't overlap messages */
16
+ div.block-container { padding-bottom: 200px; }
17
+
18
+ /* Chat-area wrapper */
19
+ #chat-area {
20
+ width: 100%;
21
+ max-width: 1100px;
22
+ margin: 0;
23
+ }
24
+
25
+ /* Sticky user question at the top while streaming */
26
+ #sticky-user {
27
+ position: sticky;
28
+ top: 18px;
29
+ z-index: 10;
30
+ padding: 12px 16px;
31
+ border-radius: 12px;
32
+ background: var(--secondary-background-color);
33
+ margin-bottom: 12px;
34
+ }
35
+
36
+ /* Message bubbles */
37
+ .msg {
38
+ padding: 14px 16px;
39
+ border-radius: 12px;
40
+ margin: 10px 0;
41
+ max-width: 85%;
42
+ line-height: 1.45;
43
+ }
44
+ .msg.user {
45
+ background: #2b2b2b;
46
+ color: white;
47
+ align-self: flex-end;
48
+ margin-left: auto;
49
+ }
50
+ .msg.assistant {
51
+ background: #f5f5f5;
52
+ color: black;
53
+ align-self: flex-start;
54
+ margin-right: auto;
55
+ }
56
 
57
  /* Fix the input form (id = input_form) at the bottom, centered */
58
  form#input_form {
 
65
  background: transparent;
66
  }
67
 
68
+ /* optional small visual tweak */
69
  form#input_form .stTextInput, form#input_form .stButton {
70
  margin: 0 6px;
71
  }
 
74
  unsafe_allow_html=True,
75
  )
76
 
77
+ # session state
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  if 'messages' not in st.session_state:
79
  # messages is a list of {"role": "user"|"assistant", "content": str}
80
  st.session_state.messages = []
 
82
  if 'is_streaming' not in st.session_state:
83
  st.session_state.is_streaming = False
84
 
85
+ if 'show_sticky' not in st.session_state:
86
+ st.session_state.show_sticky = False
87
+
88
+ # if a submission was made, store it as pending so next render can create placeholders nicely
89
+ if 'pending_question' not in st.session_state:
90
+ st.session_state.pending_question = None
91
+
92
+ # cached chatbot resource
93
  @st.cache_resource
94
  def load_chatbot():
95
  return BioethicsChatbot("data/")
 
98
  with st.spinner("🔄 Loading bioethics knowledge base..."):
99
  st.session_state.bot = load_chatbot()
100
 
 
101
  def build_history_pairs(messages, max_pairs=4):
102
  pairs = []
103
  i = 0
 
105
  if messages[i]['role'] == 'user' and messages[i+1]['role'] == 'assistant':
106
  user = messages[i]['content']
107
  assistant = messages[i+1]['content']
 
108
  if assistant is not None and assistant != "":
109
  pairs.append((user, assistant))
110
  i += 2
 
112
  i += 1
113
  return pairs[-max_pairs:]
114
 
115
+ # Layout columns
116
  col1, col2 = st.columns([4, 1])
117
 
118
  with col1:
119
  st.markdown("### 💬 Conversation")
120
 
121
+ # If pending_question exists (set on submit), append the user+empty assistant before rendering
122
+ if st.session_state.pending_question:
123
+ q = st.session_state.pending_question
124
+ st.session_state.pending_question = None
125
+ st.session_state.messages.append({"role": "user", "content": q})
126
+ st.session_state.messages.append({"role": "assistant", "content": ""})
127
+ st.session_state.show_sticky = True
128
+
129
+ # Show sticky user (top) while streaming / during in-flight
130
+ if st.session_state.show_sticky and len(st.session_state.messages) >= 2:
131
+ sticky_user = st.session_state.messages[-2]['content']
132
+ st.markdown(f"<div id='sticky-user' class='msg user'>{sticky_user}</div>", unsafe_allow_html=True)
133
+
134
+ # Chat area: render earlier messages (excluding the in-flight pair when show_sticky True)
135
+ st.markdown("<div id='chat-area'>", unsafe_allow_html=True)
136
+ render_up_to = len(st.session_state.messages)
137
+ if st.session_state.show_sticky and len(st.session_state.messages) >= 2:
138
+ render_up_to = len(st.session_state.messages) - 2
139
+
140
+ for i in range(render_up_to):
141
+ msg = st.session_state.messages[i]
142
+ if msg['role'] == 'user':
143
+ safe_html = f"<div class='msg user'>{msg['content']}</div>"
144
  else:
145
+ safe_html = f"<div class='msg assistant'>{msg['content']}</div>"
146
+ st.markdown(safe_html, unsafe_allow_html=True)
147
+
148
+ # If there is an in-flight assistant (last message is assistant and empty), create a placeholder inside chat-area
149
+ response_placeholder = None
150
+ if st.session_state.show_sticky and len(st.session_state.messages) >= 2 and st.session_state.messages[-1]['role'] == 'assistant':
151
+ response_placeholder = st.empty()
152
+ # initialize with the wrapper div (StreamHandler will update this same element)
153
+ response_placeholder.markdown("<div id='assistant-inflight' class='msg assistant'></div>", unsafe_allow_html=True)
154
+
155
+ st.markdown("</div>", unsafe_allow_html=True)
156
+
157
+ # JS: center the in-flight assistant message; if none, ensure sticky user is visible
158
  st.markdown(
159
  """
160
  <script>
161
+ (function(){
162
+ try {
163
+ const assist = document.getElementById('assistant-inflight');
164
+ const sticky = document.getElementById('sticky-user');
165
+ if (assist) {
166
+ // center assistant in viewport (approx), leaving input bar visible at bottom
167
+ const rect = assist.getBoundingClientRect();
168
+ const targetY = window.scrollY + rect.top - (window.innerHeight / 2) + (rect.height / 2);
169
+ window.scrollTo({ top: targetY, behavior: 'smooth' });
170
+ } else if (sticky) {
171
+ // make sure sticky user is visible at top
172
+ const rect = sticky.getBoundingClientRect();
173
+ const targetY = window.scrollY + rect.top - 20;
174
+ window.scrollTo({ top: targetY, behavior: 'smooth' });
175
+ }
176
+ } catch(e) {}
177
+ })();
178
  </script>
179
  """,
180
  unsafe_allow_html=True,
181
  )
182
 
183
+ # Input form (fixed by CSS at bottom)
184
  with st.form("input_form", clear_on_submit=True):
185
  question = st.text_input(
186
  "Your question:",
187
+ value="",
188
  placeholder="e.g., What are the ethical considerations in clinical trials?",
189
  key="question_input"
190
  )
191
  submit = st.form_submit_button("Send")
192
 
193
+ # handle submit: set pending and trigger a rerun by leaving the page (we don't call experimental_rerun,
194
+ # since adding pending and letting the code continue will append on the next run)
195
+ if submit and question and not st.session_state.is_streaming:
196
+ # put question into pending so top-of-render will append it and create placeholder in the same run
197
+ st.session_state.pending_question = question
198
+ # do a soft rerun by calling st.experimental_rerun() to ensure placeholder is created before streaming
199
+ st.experimental_rerun()
200
 
201
+ # If there's an in-flight assistant placeholder and we haven't started streaming yet, start it
202
+ if response_placeholder is not None and not st.session_state.is_streaming:
203
+ # prepare history (exclude the in-flight assistant itself)
204
+ history_pairs = build_history_pairs(st.session_state.messages[:-1])
205
+ # start streaming
206
+ st.session_state.is_streaming = True
207
+ st.session_state.bot.stream_handler.current_text = ""
208
+ st.session_state.bot.stream_handler.placeholder = response_placeholder
209
+ try:
210
+ # ask; this will stream tokens via the handler into response_placeholder
211
+ question_for_call = st.session_state.messages[-2]['content']
212
+ answer = st.session_state.bot.ask(question_for_call, history_pairs=history_pairs)
213
+ except Exception as e:
214
+ answer = f"❌ Error while generating response: {e}"
215
+
216
+ # finalize: ensure placeholder displays final text, persist into messages, and clear sticky
217
+ try:
218
+ response_placeholder.markdown(f"<div id='assistant-inflight' class='msg assistant'>{answer}</div>", unsafe_allow_html=True)
219
+ except Exception:
220
+ pass
221
+
222
+ # store final assistant text
223
+ st.session_state.messages[-1]['content'] = answer
224
+ st.session_state.show_sticky = False
225
+ st.session_state.is_streaming = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
 
227
  with col2:
228
+ if 'query_count' not in st.session_state:
229
+ st.session_state.query_count = 0
230
  st.metric("Queries used in your session", f"{st.session_state.query_count}/30")
231
  st.markdown("---")
232
  with st.expander("📚 About the Sources"):
 
262
  st.markdown(f"- Vector dimension: {st.session_state.bot.vector_store.dimension}")
263
  except Exception:
264
  pass
265
+ st.markdown(f"- Queries today: {st.session_state.get('query_count', 0)}")