Ani14 commited on
Commit
08418b2
Β·
verified Β·
1 Parent(s): 04b8d03

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -52
app.py CHANGED
@@ -13,17 +13,31 @@ from fpdf import FPDF
13
  from duckduckgo_search import DDGS
14
  from io import BytesIO
15
 
16
- # --- Load API Keys ---
17
  load_dotenv()
18
  OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
19
  TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
20
  tavily = TavilyClient(api_key=TAVILY_API_KEY)
21
 
22
- # --- Utility Functions ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  def save_session_data():
24
  data = {
25
- "memory_bank": st.session_state.get("memory_bank", []),
26
- "chat_threads": st.session_state.get("chat_threads", {})
27
  }
28
  with open("session_memory.json", "w", encoding="utf-8") as f:
29
  json.dump(data, f, ensure_ascii=False, indent=4)
@@ -35,12 +49,14 @@ def load_session_data():
35
  st.session_state.memory_bank = data.get("memory_bank", [])
36
  st.session_state.chat_threads = data.get("chat_threads", {})
37
 
 
 
 
38
  def call_llm(messages, model="deepseek/deepseek-chat-v3-0324:free", max_tokens=4000, temperature=0.7):
39
  url = "https://openrouter.ai/api/v1/chat/completions"
40
  headers = {
41
  "Authorization": f"Bearer {OPENROUTER_API_KEY}",
42
- "Content-Type": "application/json",
43
- "X-Title": "Deep Research Assistant"
44
  }
45
  data = {
46
  "model": model,
@@ -64,8 +80,8 @@ def call_llm(messages, model="deepseek/deepseek-chat-v3-0324:free", max_tokens=4
64
  yield token
65
  except json.JSONDecodeError:
66
  pass
67
-
68
- def get_image_urls(query, max_images=5):
69
  with DDGS() as ddgs:
70
  return [img["image"] for img in ddgs.images(query, max_results=max_images)]
71
 
@@ -175,21 +191,11 @@ def download_threads_as_pdf(chat_threads):
175
  pdf_output.seek(0)
176
  return pdf_output
177
 
178
- # --- Streamlit UI Setup ---
179
  st.set_page_config(page_title="🧠 Deep Research Assistant 4.0", layout="centered")
180
 
181
- if "memory_bank" not in st.session_state:
182
- st.session_state.memory_bank = []
183
- if "chat_threads" not in st.session_state:
184
- st.session_state.chat_threads = {}
185
- if "current_thread_id" not in st.session_state:
186
- st.session_state.current_thread_id = None
187
-
188
- load_session_data()
189
-
190
  # --- Sidebar ---
191
  with st.sidebar:
192
- st.image("https://raw.githubusercontent.com/mk-gurucharan/streamlit-deep-research/main/deep_research_logo.png", use_container_width=True)
193
  st.markdown("## πŸ” Start New Research")
194
  topic = st.text_input("🧠 Topic")
195
  report_type = st.selectbox("πŸ“„ Report Type", ["Summary", "Detailed Report", "Thorough Academic Research"])
@@ -197,17 +203,12 @@ with st.sidebar:
197
  source_type = st.selectbox("πŸ“š Sources", ["Web Only", "Academic Only", "Hybrid"])
198
  custom_domains = st.text_input("🌐 Optional Domains", placeholder="forbes.com, mit.edu")
199
  research_button = st.button("πŸš€ Run Deep Research", use_container_width=True)
200
- st.markdown("---")
201
- st.markdown("Made with ❀️ by Cutie AI ✨")
202
 
203
- # --- Main Title ---
204
  st.title("πŸŒ™ Deep Research Assistant 4.0")
205
  st.markdown("Where serious research meets serious style. πŸ§ πŸ’–")
206
  st.divider()
207
 
208
- # Continue from here for Main Research Section...
209
-
210
- # --- Show Web Images from Topic ---
211
  if topic and research_button:
212
  st.subheader("πŸ–Ό Related Images from the Web")
213
  try:
@@ -294,13 +295,12 @@ Citations:
294
 
295
  except Exception as e:
296
  st.error(f"❌ Error: {e}")
297
-
298
  # --- Chat Threads Section ---
299
  st.divider()
300
  st.subheader("πŸ“‚ Your Research Threads")
301
 
302
- user_avatar = "https://cdn-icons-png.flaticon.com/512/9131/9131529.png"
303
- assistant_avatar = "https://cdn-icons-png.flaticon.com/512/4712/4712107.png"
304
 
305
  for tid, chats in st.session_state.chat_threads.items():
306
  with st.container():
@@ -339,18 +339,6 @@ if st.session_state.chat_threads:
339
  pdf_file = download_threads_as_pdf(st.session_state.chat_threads)
340
  st.download_button("πŸ“₯ Download All Threads as PDF", data=pdf_file, file_name="Research_Threads.pdf", mime="application/pdf", use_container_width=True)
341
 
342
-
343
-
344
- # 🧠 Initialize session state
345
- if "last_report" not in st.session_state:
346
- st.session_state["last_report"] = ""
347
- if "follow_up_input" not in st.session_state:
348
- st.session_state["follow_up_input"] = ""
349
- if "methodology_notes" not in st.session_state:
350
- st.session_state["methodology_notes"] = ""
351
- if "chat_history" not in st.session_state:
352
- st.session_state["chat_history"] = []
353
-
354
  # --- Methodology Recommender ---
355
  st.divider()
356
  st.subheader("πŸ§ͺ Methodology Recommender")
@@ -368,7 +356,6 @@ if st.button("🧠 Suggest Research Methodologies"):
368
  method_output += chunk
369
  method_box.markdown(method_output, unsafe_allow_html=True)
370
 
371
- # βœ… Store methodology context for follow-up
372
  st.session_state["methodology_notes"] = method_output
373
 
374
  except Exception as e:
@@ -433,15 +420,8 @@ if uploaded_file and st.button("🧠 Analyze and Suggest Improvements"):
433
  else:
434
  feedback_prompt = [
435
  {"role": "system", "content": "You are an expert academic advisor."},
436
- {"role": "user", "content": f"""I have written the following research paper. Please analyze it and provide detailed suggestions on:
437
- - Areas where the paper is weak or unclear
438
- - How to improve the novelty or originality
439
- - Structural improvements or better ways to present arguments
440
-
441
- Be honest and constructive. Here's the full text:
442
-
443
- \"\"\"{paper_text}\"\"\""""}
444
- ]
445
 
446
  with st.status("πŸ”Ž Analyzing your paper..."):
447
  improvement_output = ""
@@ -453,8 +433,8 @@ if uploaded_file and st.button("🧠 Analyze and Suggest Improvements"):
453
  except Exception as e:
454
  st.error(f"❌ Error while analyzing paper: {e}")
455
 
456
-
457
- # --- Optional: View Chat History ---
458
  with st.expander("πŸ“œ View Full Chat History", expanded=False):
459
  for msg in st.session_state.chat_history:
460
  role = msg["role"]
 
13
  from duckduckgo_search import DDGS
14
  from io import BytesIO
15
 
16
+ # Load environment variables
17
  load_dotenv()
18
  OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
19
  TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
20
  tavily = TavilyClient(api_key=TAVILY_API_KEY)
21
 
22
+ # Initialize session state
23
+ if "memory_bank" not in st.session_state:
24
+ st.session_state.memory_bank = []
25
+ if "chat_threads" not in st.session_state:
26
+ st.session_state.chat_threads = {}
27
+ if "current_thread_id" not in st.session_state:
28
+ st.session_state.current_thread_id = None
29
+ if "last_report" not in st.session_state:
30
+ st.session_state.last_report = ""
31
+ if "methodology_notes" not in st.session_state:
32
+ st.session_state.methodology_notes = ""
33
+ if "chat_history" not in st.session_state:
34
+ st.session_state.chat_history = []
35
+
36
+ # Session data functions
37
  def save_session_data():
38
  data = {
39
+ "memory_bank": st.session_state.memory_bank,
40
+ "chat_threads": st.session_state.chat_threads
41
  }
42
  with open("session_memory.json", "w", encoding="utf-8") as f:
43
  json.dump(data, f, ensure_ascii=False, indent=4)
 
49
  st.session_state.memory_bank = data.get("memory_bank", [])
50
  st.session_state.chat_threads = data.get("chat_threads", {})
51
 
52
+ load_session_data()
53
+
54
+ # LLM call
55
  def call_llm(messages, model="deepseek/deepseek-chat-v3-0324:free", max_tokens=4000, temperature=0.7):
56
  url = "https://openrouter.ai/api/v1/chat/completions"
57
  headers = {
58
  "Authorization": f"Bearer {OPENROUTER_API_KEY}",
59
+ "Content-Type": "application/json"
 
60
  }
61
  data = {
62
  "model": model,
 
80
  yield token
81
  except json.JSONDecodeError:
82
  pass
83
+ # --- Source Gathering Functions ---
84
+ def get_image_urls(query, max_images=6):
85
  with DDGS() as ddgs:
86
  return [img["image"] for img in ddgs.images(query, max_results=max_images)]
87
 
 
191
  pdf_output.seek(0)
192
  return pdf_output
193
 
194
+ # --- Streamlit UI Start ---
195
  st.set_page_config(page_title="🧠 Deep Research Assistant 4.0", layout="centered")
196
 
 
 
 
 
 
 
 
 
 
197
  # --- Sidebar ---
198
  with st.sidebar:
 
199
  st.markdown("## πŸ” Start New Research")
200
  topic = st.text_input("🧠 Topic")
201
  report_type = st.selectbox("πŸ“„ Report Type", ["Summary", "Detailed Report", "Thorough Academic Research"])
 
203
  source_type = st.selectbox("πŸ“š Sources", ["Web Only", "Academic Only", "Hybrid"])
204
  custom_domains = st.text_input("🌐 Optional Domains", placeholder="forbes.com, mit.edu")
205
  research_button = st.button("πŸš€ Run Deep Research", use_container_width=True)
 
 
206
 
 
207
  st.title("πŸŒ™ Deep Research Assistant 4.0")
208
  st.markdown("Where serious research meets serious style. πŸ§ πŸ’–")
209
  st.divider()
210
 
211
+ # --- Web Images Section ---
 
 
212
  if topic and research_button:
213
  st.subheader("πŸ–Ό Related Images from the Web")
214
  try:
 
295
 
296
  except Exception as e:
297
  st.error(f"❌ Error: {e}")
 
298
  # --- Chat Threads Section ---
299
  st.divider()
300
  st.subheader("πŸ“‚ Your Research Threads")
301
 
302
+ user_avatar = "https://cdn-icons-png.flaticon.com/512/9131/9131529.png"
303
+ assistant_avatar = "https://cdn-icons-png.flaticon.com/512/4712/4712107.png"
304
 
305
  for tid, chats in st.session_state.chat_threads.items():
306
  with st.container():
 
339
  pdf_file = download_threads_as_pdf(st.session_state.chat_threads)
340
  st.download_button("πŸ“₯ Download All Threads as PDF", data=pdf_file, file_name="Research_Threads.pdf", mime="application/pdf", use_container_width=True)
341
 
 
 
 
 
 
 
 
 
 
 
 
 
342
  # --- Methodology Recommender ---
343
  st.divider()
344
  st.subheader("πŸ§ͺ Methodology Recommender")
 
356
  method_output += chunk
357
  method_box.markdown(method_output, unsafe_allow_html=True)
358
 
 
359
  st.session_state["methodology_notes"] = method_output
360
 
361
  except Exception as e:
 
420
  else:
421
  feedback_prompt = [
422
  {"role": "system", "content": "You are an expert academic advisor."},
423
+ {"role": "user", "content": f\"\"\"I have written the following research paper. Please analyze it and provide detailed suggestions on:\n- Areas where the paper is weak or unclear\n- How to improve the novelty or originality\n- Structural improvements or better ways to present arguments\n\nBe honest and constructive. Here's the full text:\n\n\"\"\"{paper_text}\"\"\"\"}
424
+ ]
 
 
 
 
 
 
 
425
 
426
  with st.status("πŸ”Ž Analyzing your paper..."):
427
  improvement_output = ""
 
433
  except Exception as e:
434
  st.error(f"❌ Error while analyzing paper: {e}")
435
 
436
+ # --- Full Chat History Viewer ---
437
+ st.divider()
438
  with st.expander("πŸ“œ View Full Chat History", expanded=False):
439
  for msg in st.session_state.chat_history:
440
  role = msg["role"]