Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,19 +2,20 @@ import os
|
|
| 2 |
import streamlit as st
|
| 3 |
import requests
|
| 4 |
import feedparser
|
| 5 |
-
from duckduckgo_search import DDGS
|
| 6 |
from dotenv import load_dotenv
|
|
|
|
| 7 |
|
|
|
|
| 8 |
load_dotenv()
|
| 9 |
-
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
|
| 10 |
|
| 11 |
-
# --- LLM
|
| 12 |
def call_llm(messages, model="deepseek/deepseek-chat-v3-0324:free", max_tokens=2048, temperature=0.7):
|
| 13 |
url = "https://openrouter.ai/api/v1/chat/completions"
|
| 14 |
headers = {
|
| 15 |
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
|
| 16 |
"Content-Type": "application/json",
|
| 17 |
-
"X-Title": "Autonomous Research
|
| 18 |
}
|
| 19 |
data = {
|
| 20 |
"model": model,
|
|
@@ -22,23 +23,89 @@ def call_llm(messages, model="deepseek/deepseek-chat-v3-0324:free", max_tokens=2
|
|
| 22 |
"max_tokens": max_tokens,
|
| 23 |
"temperature": temperature
|
| 24 |
}
|
| 25 |
-
|
| 26 |
try:
|
| 27 |
response = requests.post(url, headers=headers, json=data)
|
| 28 |
result = response.json()
|
| 29 |
except Exception as e:
|
| 30 |
-
raise RuntimeError(f"Failed to
|
| 31 |
-
|
| 32 |
if response.status_code != 200:
|
| 33 |
-
|
| 34 |
-
raise RuntimeError(f"OpenRouter API
|
|
|
|
|
|
|
|
|
|
| 35 |
|
| 36 |
-
|
| 37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
st.set_page_config("Autonomous Research Agent", layout="wide")
|
| 43 |
st.title("🤖 Autonomous AI Research Assistant")
|
| 44 |
|
|
@@ -47,27 +114,27 @@ if "chat_history" not in st.session_state:
|
|
| 47 |
|
| 48 |
topic = st.text_input("Enter a research topic:")
|
| 49 |
if st.button("Run Agent"):
|
| 50 |
-
with st.spinner("
|
| 51 |
try:
|
| 52 |
-
|
| 53 |
st.session_state.chat_history.append({"role": "user", "content": topic})
|
| 54 |
-
st.session_state.chat_history.append({"role": "assistant", "content":
|
| 55 |
-
st.markdown(
|
| 56 |
except Exception as e:
|
| 57 |
-
st.error(f"
|
| 58 |
|
| 59 |
# --- Follow-up Chat ---
|
| 60 |
st.divider()
|
| 61 |
-
st.subheader("💬
|
| 62 |
-
followup = st.text_input("Ask something
|
| 63 |
-
if st.button("
|
| 64 |
if followup:
|
| 65 |
chat = st.session_state.chat_history + [{"role": "user", "content": followup}]
|
| 66 |
-
with st.spinner("
|
| 67 |
try:
|
| 68 |
-
|
| 69 |
st.session_state.chat_history.append({"role": "user", "content": followup})
|
| 70 |
-
st.session_state.chat_history.append({"role": "assistant", "content":
|
| 71 |
-
st.markdown(
|
| 72 |
except Exception as e:
|
| 73 |
-
st.error(f"Follow-up
|
|
|
|
| 2 |
import streamlit as st
|
| 3 |
import requests
|
| 4 |
import feedparser
|
|
|
|
| 5 |
from dotenv import load_dotenv
|
| 6 |
+
from duckduckgo_search import DDGS
|
| 7 |
|
| 8 |
+
# Load API key from .env
|
| 9 |
load_dotenv()
|
| 10 |
+
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
|
| 11 |
|
| 12 |
+
# --- LLM Function ---
|
| 13 |
def call_llm(messages, model="deepseek/deepseek-chat-v3-0324:free", max_tokens=2048, temperature=0.7):
|
| 14 |
url = "https://openrouter.ai/api/v1/chat/completions"
|
| 15 |
headers = {
|
| 16 |
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
|
| 17 |
"Content-Type": "application/json",
|
| 18 |
+
"X-Title": "Autonomous Research Assistant"
|
| 19 |
}
|
| 20 |
data = {
|
| 21 |
"model": model,
|
|
|
|
| 23 |
"max_tokens": max_tokens,
|
| 24 |
"temperature": temperature
|
| 25 |
}
|
|
|
|
| 26 |
try:
|
| 27 |
response = requests.post(url, headers=headers, json=data)
|
| 28 |
result = response.json()
|
| 29 |
except Exception as e:
|
| 30 |
+
raise RuntimeError(f"Failed to connect or parse response: {e}")
|
|
|
|
| 31 |
if response.status_code != 200:
|
| 32 |
+
error = result.get("error", {}).get("message", response.text)
|
| 33 |
+
raise RuntimeError(f"OpenRouter API Error: {error}")
|
| 34 |
+
if "choices" not in result:
|
| 35 |
+
raise RuntimeError(f"Invalid response: {result}")
|
| 36 |
+
return result["choices"][0]["message"]["content"]
|
| 37 |
|
| 38 |
+
# --- Source Fetching Functions ---
|
| 39 |
+
def get_arxiv_papers(query, max_results=3):
|
| 40 |
+
from urllib.parse import quote_plus
|
| 41 |
+
url = f"http://export.arxiv.org/api/query?search_query=all:{quote_plus(query)}&start=0&max_results={max_results}"
|
| 42 |
+
feed = feedparser.parse(url)
|
| 43 |
+
return [{
|
| 44 |
+
"title": e.title,
|
| 45 |
+
"summary": e.summary.strip().replace("\n", " "),
|
| 46 |
+
"url": next((l.href for l in e.links if l.type == "application/pdf"), "")
|
| 47 |
+
} for e in feed.entries]
|
| 48 |
|
| 49 |
+
def get_semantic_scholar_papers(query, max_results=3):
|
| 50 |
+
url = "https://api.semanticscholar.org/graph/v1/paper/search"
|
| 51 |
+
params = {"query": query, "limit": max_results, "fields": "title,abstract,url"}
|
| 52 |
+
response = requests.get(url, params=params)
|
| 53 |
+
papers = response.json().get("data", [])
|
| 54 |
+
return [{
|
| 55 |
+
"title": p.get("title", ""),
|
| 56 |
+
"summary": p.get("abstract", "No abstract").strip(),
|
| 57 |
+
"url": p.get("url", "")
|
| 58 |
+
} for p in papers]
|
| 59 |
+
|
| 60 |
+
def search_duckduckgo(query, max_results=3):
|
| 61 |
+
with DDGS() as ddgs:
|
| 62 |
+
return [{
|
| 63 |
+
"title": r["title"],
|
| 64 |
+
"snippet": r["body"],
|
| 65 |
+
"url": r["href"]
|
| 66 |
+
} for r in ddgs.text(query, max_results=max_results)]
|
| 67 |
+
|
| 68 |
+
def get_image_url(query):
|
| 69 |
+
with DDGS() as ddgs:
|
| 70 |
+
for r in ddgs.images(query, max_results=1):
|
| 71 |
+
return r["image"]
|
| 72 |
+
return None
|
| 73 |
+
|
| 74 |
+
# --- Autonomous Agent ---
|
| 75 |
+
def autonomous_research_agent(topic):
|
| 76 |
+
arxiv = get_arxiv_papers(topic)
|
| 77 |
+
scholar = get_semantic_scholar_papers(topic)
|
| 78 |
+
web = search_duckduckgo(topic)
|
| 79 |
+
image = get_image_url(topic)
|
| 80 |
+
|
| 81 |
+
prompt = f"# Research Topic: {topic}\n\n"
|
| 82 |
+
|
| 83 |
+
if image:
|
| 84 |
+
prompt += f"\n\n"
|
| 85 |
+
|
| 86 |
+
prompt += "## ArXiv:\n"
|
| 87 |
+
for p in arxiv:
|
| 88 |
+
prompt += f"- [{p['title']}]({p['url']})\n> {p['summary'][:300]}...\n\n"
|
| 89 |
+
|
| 90 |
+
prompt += "## Semantic Scholar:\n"
|
| 91 |
+
for p in scholar:
|
| 92 |
+
prompt += f"- [{p['title']}]({p['url']})\n> {p['summary'][:300]}...\n\n"
|
| 93 |
+
|
| 94 |
+
prompt += "## Web Insights:\n"
|
| 95 |
+
for w in web:
|
| 96 |
+
prompt += f"- [{w['title']}]({w['url']})\n> {w['snippet']}\n\n"
|
| 97 |
+
|
| 98 |
+
prompt += (
|
| 99 |
+
"Now, based on the above sources:\n"
|
| 100 |
+
"1. Identify overlapping research themes\n"
|
| 101 |
+
"2. Highlight a gap or opportunity\n"
|
| 102 |
+
"3. Propose a novel research direction\n"
|
| 103 |
+
"4. Write a full academic-style narrative in markdown (no section labels)\n"
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
return call_llm([{"role": "user", "content": prompt}], max_tokens=3000)
|
| 107 |
+
|
| 108 |
+
# --- Streamlit App ---
|
| 109 |
st.set_page_config("Autonomous Research Agent", layout="wide")
|
| 110 |
st.title("🤖 Autonomous AI Research Assistant")
|
| 111 |
|
|
|
|
| 114 |
|
| 115 |
topic = st.text_input("Enter a research topic:")
|
| 116 |
if st.button("Run Agent"):
|
| 117 |
+
with st.spinner("Gathering sources & thinking..."):
|
| 118 |
try:
|
| 119 |
+
response = autonomous_research_agent(topic)
|
| 120 |
st.session_state.chat_history.append({"role": "user", "content": topic})
|
| 121 |
+
st.session_state.chat_history.append({"role": "assistant", "content": response})
|
| 122 |
+
st.markdown(response)
|
| 123 |
except Exception as e:
|
| 124 |
+
st.error(f"Failed: {e}")
|
| 125 |
|
| 126 |
# --- Follow-up Chat ---
|
| 127 |
st.divider()
|
| 128 |
+
st.subheader("💬 Follow-up Questions")
|
| 129 |
+
followup = st.text_input("Ask something about the research:")
|
| 130 |
+
if st.button("Ask"):
|
| 131 |
if followup:
|
| 132 |
chat = st.session_state.chat_history + [{"role": "user", "content": followup}]
|
| 133 |
+
with st.spinner("Answering..."):
|
| 134 |
try:
|
| 135 |
+
answer = call_llm(chat, max_tokens=1500)
|
| 136 |
st.session_state.chat_history.append({"role": "user", "content": followup})
|
| 137 |
+
st.session_state.chat_history.append({"role": "assistant", "content": answer})
|
| 138 |
+
st.markdown(answer)
|
| 139 |
except Exception as e:
|
| 140 |
+
st.error(f"Follow-up error: {e}")
|