Spaces:
Sleeping
Sleeping
Update app.py
#2
by
VishnuCodes
- opened
app.py
CHANGED
|
@@ -2,6 +2,7 @@ import os
|
|
| 2 |
import streamlit as st
|
| 3 |
import requests
|
| 4 |
import feedparser
|
|
|
|
| 5 |
from dotenv import load_dotenv
|
| 6 |
from duckduckgo_search import DDGS
|
| 7 |
|
|
@@ -67,6 +68,18 @@ def get_image_urls(query, max_images=3):
|
|
| 67 |
with DDGS() as ddgs:
|
| 68 |
return [img["image"] for img in ddgs.images(query, max_results=max_images)]
|
| 69 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
# --- Research Agent ---
|
| 71 |
def autonomous_research_agent(topic):
|
| 72 |
arxiv = get_arxiv_papers(topic)
|
|
@@ -74,17 +87,20 @@ def autonomous_research_agent(topic):
|
|
| 74 |
web = search_duckduckgo(topic)
|
| 75 |
images = get_image_urls(topic)
|
| 76 |
|
| 77 |
-
arxiv_md = ""
|
| 78 |
for p in arxiv:
|
| 79 |
arxiv_md += f"- [{p['title']}]({p['url']})\n> {p['summary'][:300]}...\n\n"
|
|
|
|
| 80 |
|
| 81 |
-
scholar_md = ""
|
| 82 |
for p in scholar:
|
| 83 |
scholar_md += f"- [{p['title']}]({p['url']})\n> {p['summary'][:300]}...\n\n"
|
|
|
|
| 84 |
|
| 85 |
-
web_md = ""
|
| 86 |
for w in web:
|
| 87 |
web_md += f"- [{w['title']}]({w['url']})\n> {w['snippet']}\n\n"
|
|
|
|
| 88 |
|
| 89 |
prompt = f"""
|
| 90 |
# Research Topic: {topic}
|
|
@@ -115,6 +131,12 @@ Now synthesize this information into:
|
|
| 115 |
if web_md:
|
| 116 |
response += "**Web:**\n" + web_md
|
| 117 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
return response, images
|
| 119 |
|
| 120 |
# --- Streamlit UI ---
|
|
|
|
| 2 |
import streamlit as st
|
| 3 |
import requests
|
| 4 |
import feedparser
|
| 5 |
+
import datetime
|
| 6 |
from dotenv import load_dotenv
|
| 7 |
from duckduckgo_search import DDGS
|
| 8 |
|
|
|
|
| 68 |
with DDGS() as ddgs:
|
| 69 |
return [img["image"] for img in ddgs.images(query, max_results=max_images)]
|
| 70 |
|
| 71 |
+
def generate_apa_citation(title, url, source=""):
|
| 72 |
+
current_year = datetime.datetime.now().year
|
| 73 |
+
if source == "arxiv":
|
| 74 |
+
return f"{title}. ({current_year}). *arXiv*. {url}"
|
| 75 |
+
elif source == "semantic":
|
| 76 |
+
return f"{title}. ({current_year}). *Semantic Scholar*. {url}"
|
| 77 |
+
elif source == "web":
|
| 78 |
+
return f"{title}. ({current_year}). *Web Source*. {url}"
|
| 79 |
+
else:
|
| 80 |
+
return f"{title}. ({current_year}). {url}"
|
| 81 |
+
|
| 82 |
+
|
| 83 |
# --- Research Agent ---
|
| 84 |
def autonomous_research_agent(topic):
|
| 85 |
arxiv = get_arxiv_papers(topic)
|
|
|
|
| 87 |
web = search_duckduckgo(topic)
|
| 88 |
images = get_image_urls(topic)
|
| 89 |
|
| 90 |
+
arxiv_md, arxiv_citations = "", []
|
| 91 |
for p in arxiv:
|
| 92 |
arxiv_md += f"- [{p['title']}]({p['url']})\n> {p['summary'][:300]}...\n\n"
|
| 93 |
+
arxiv_citations.append(generate_apa_citation(p["title"], p["url"], source="arxiv"))
|
| 94 |
|
| 95 |
+
scholar_md, scholar_citations = "", []
|
| 96 |
for p in scholar:
|
| 97 |
scholar_md += f"- [{p['title']}]({p['url']})\n> {p['summary'][:300]}...\n\n"
|
| 98 |
+
scholar_citations.append(generate_apa_citation(p["title"], p["url"], source="semantic"))
|
| 99 |
|
| 100 |
+
web_md, web_citations = "", []
|
| 101 |
for w in web:
|
| 102 |
web_md += f"- [{w['title']}]({w['url']})\n> {w['snippet']}\n\n"
|
| 103 |
+
web_citations.append(generate_apa_citation(w["title"], w["url"], source="web"))
|
| 104 |
|
| 105 |
prompt = f"""
|
| 106 |
# Research Topic: {topic}
|
|
|
|
| 131 |
if web_md:
|
| 132 |
response += "**Web:**\n" + web_md
|
| 133 |
|
| 134 |
+
# APA Citations Section
|
| 135 |
+
all_citations = arxiv_citations + scholar_citations + web_citations
|
| 136 |
+
response += "\n---\n### π APA Citations\n"
|
| 137 |
+
for cite in all_citations:
|
| 138 |
+
response += f"- {cite}\n"
|
| 139 |
+
|
| 140 |
return response, images
|
| 141 |
|
| 142 |
# --- Streamlit UI ---
|