Ani14 commited on
Commit
093bc19
Β·
verified Β·
1 Parent(s): 5db3787

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -28
app.py CHANGED
@@ -190,13 +190,13 @@ def generate_download_button(file, label, mime_type):
190
  # --- Streamlit UI ---
191
  st.set_page_config("Deep Research Assistant", layout="centered")
192
 
193
- # Initialize session state
194
  if "last_report" not in st.session_state:
195
  st.session_state["last_report"] = ""
196
  if "follow_up_input" not in st.session_state:
197
  st.session_state["follow_up_input"] = ""
198
- if "methodology_recommended" not in st.session_state:
199
- st.session_state["methodology_recommended"] = False
200
  if "chat_history" not in st.session_state:
201
  st.session_state["chat_history"] = []
202
 
@@ -296,7 +296,32 @@ APA Citations:
296
  except Exception as e:
297
  st.error(f"❌ Error occurred: {e}")
298
 
299
- # --- Follow-up Q&A (always visible) ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
300
  st.divider()
301
  st.subheader("πŸ’¬ Follow-up Q&A")
302
 
@@ -305,40 +330,33 @@ followup = st.text_input("Ask a follow-up question:", key="follow_up_input")
305
  if st.button("Ask"):
306
  if followup:
307
  try:
308
- chat = st.session_state.chat_history + [{"role": "user", "content": followup}]
 
 
 
 
 
 
 
 
 
 
 
309
  response = ""
310
  for chunk in call_llm(chat, max_tokens=1500):
311
  response += chunk
 
312
  st.session_state.chat_history.append({"role": "user", "content": followup})
313
  st.session_state.chat_history.append({"role": "assistant", "content": response})
 
314
  st.markdown(response)
 
315
  except Exception as e:
316
  st.error(f"Follow-up error: {e}")
317
 
318
- # --- Chat history display (optional) ---
319
  with st.expander("πŸ“œ View Full Chat History", expanded=False):
320
  for msg in st.session_state.chat_history:
321
  role = msg["role"]
322
  prefix = "πŸ‘€ You" if role == "user" else "πŸ€– Assistant"
323
- st.markdown(f"**{prefix}:** {msg['content']}")
324
-
325
- # --- Methodology Recommender ---
326
- st.divider()
327
- st.subheader("πŸ§ͺ Methodology Recommender")
328
-
329
- if st.button("🧠 Suggest Research Methodologies"):
330
- if st.session_state["last_report"]:
331
- try:
332
- method_prompt = [
333
- {"role": "system", "content": "You are a research advisor. Based on the following research report, suggest suitable research methodologies (quantitative, qualitative, ML/AI techniques, etc.). Be concise and give bullet-point suggestions."},
334
- {"role": "user", "content": st.session_state["last_report"]}
335
- ]
336
- method_output = ""
337
- method_box = st.empty()
338
- for chunk in call_llm(method_prompt):
339
- method_output += chunk
340
- method_box.markdown(method_output, unsafe_allow_html=True)
341
- except Exception as e:
342
- st.error(f"❌ Methodology suggestion failed: {e}")
343
- else:
344
- st.warning("⚠️ Generate the research report first.")
 
190
  # --- Streamlit UI ---
191
  st.set_page_config("Deep Research Assistant", layout="centered")
192
 
193
+ # 🧠 Initialize session state
194
  if "last_report" not in st.session_state:
195
  st.session_state["last_report"] = ""
196
  if "follow_up_input" not in st.session_state:
197
  st.session_state["follow_up_input"] = ""
198
+ if "methodology_notes" not in st.session_state:
199
+ st.session_state["methodology_notes"] = ""
200
  if "chat_history" not in st.session_state:
201
  st.session_state["chat_history"] = []
202
 
 
296
  except Exception as e:
297
  st.error(f"❌ Error occurred: {e}")
298
 
299
+ # --- Methodology Recommender ---
300
+ st.divider()
301
+ st.subheader("πŸ§ͺ Methodology Recommender")
302
+
303
+ if st.button("🧠 Suggest Research Methodologies"):
304
+ if st.session_state["last_report"]:
305
+ try:
306
+ method_prompt = [
307
+ {"role": "system", "content": "You are a research advisor. Based on the following research report, suggest suitable research methodologies (quantitative, qualitative, ML/AI techniques, etc.). Be concise and give bullet-point suggestions."},
308
+ {"role": "user", "content": st.session_state["last_report"]}
309
+ ]
310
+ method_output = ""
311
+ method_box = st.empty()
312
+ for chunk in call_llm(method_prompt):
313
+ method_output += chunk
314
+ method_box.markdown(method_output, unsafe_allow_html=True)
315
+
316
+ # βœ… Store methodology context for follow-up
317
+ st.session_state["methodology_notes"] = method_output
318
+
319
+ except Exception as e:
320
+ st.error(f"❌ Methodology suggestion failed: {e}")
321
+ else:
322
+ st.warning("⚠️ Generate the research report first.")
323
+
324
+ # --- Follow-up Q&A (Contextual to Report + Methodology) ---
325
  st.divider()
326
  st.subheader("πŸ’¬ Follow-up Q&A")
327
 
 
330
  if st.button("Ask"):
331
  if followup:
332
  try:
333
+ context_intro = (
334
+ "Below is a research report followed by methodology suggestions.\n"
335
+ "Use both to answer the user's follow-up question."
336
+ )
337
+ combined_context = f"{context_intro}\n\n=== Report ===\n{st.session_state['last_report']}\n\n=== Methodology ===\n{st.session_state['methodology_notes']}"
338
+
339
+ chat = st.session_state.chat_history + [
340
+ {"role": "system", "content": "You are an academic research assistant."},
341
+ {"role": "user", "content": combined_context},
342
+ {"role": "user", "content": followup}
343
+ ]
344
+
345
  response = ""
346
  for chunk in call_llm(chat, max_tokens=1500):
347
  response += chunk
348
+
349
  st.session_state.chat_history.append({"role": "user", "content": followup})
350
  st.session_state.chat_history.append({"role": "assistant", "content": response})
351
+
352
  st.markdown(response)
353
+
354
  except Exception as e:
355
  st.error(f"Follow-up error: {e}")
356
 
357
+ # --- Optional: View Chat History ---
358
  with st.expander("πŸ“œ View Full Chat History", expanded=False):
359
  for msg in st.session_state.chat_history:
360
  role = msg["role"]
361
  prefix = "πŸ‘€ You" if role == "user" else "πŸ€– Assistant"
362
+ st.markdown(f"**{prefix}:** {msg['content']}")