Ani14 commited on
Commit
ff83913
·
verified ·
1 Parent(s): 06e1e90

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -57
app.py CHANGED
@@ -22,66 +22,21 @@ def call_llm(messages, model="deepseek/deepseek-chat-v3-0324:free", max_tokens=2
22
  "max_tokens": max_tokens,
23
  "temperature": temperature
24
  }
25
- response = requests.post(url, headers=headers, json=data)
26
- result = response.json()
27
- if "choices" not in result:
28
- raise RuntimeError(f"LLM returned invalid response: {result}")
29
- return result["choices"][0]["message"]["content"]
30
-
31
- # --- Research Source Functions ---
32
- def get_arxiv_papers(query, max_results=3):
33
- from urllib.parse import quote_plus
34
- url = f"http://export.arxiv.org/api/query?search_query=all:{quote_plus(query)}&start=0&max_results={max_results}"
35
- feed = feedparser.parse(url)
36
- papers = []
37
- for entry in feed.entries:
38
- pdf = next((link.href for link in entry.links if link.type == "application/pdf"), "")
39
- papers.append({"title": entry.title, "summary": entry.summary[:300], "url": pdf})
40
- return papers
41
-
42
- def get_semantic_scholar_papers(query, max_results=3):
43
- url = "https://api.semanticscholar.org/graph/v1/paper/search"
44
- params = {"query": query, "limit": max_results, "fields": "title,abstract,url"}
45
- response = requests.get(url, params=params)
46
- results = response.json().get("data", [])
47
- return [{"title": p["title"], "summary": p.get("abstract", "N/A")[:300], "url": p.get("url", "")} for p in results]
48
-
49
- def search_duckduckgo_snippets(query, max_results=3):
50
- with DDGS() as ddgs:
51
- return [
52
- {"title": r["title"], "snippet": r["body"], "url": r["href"]}
53
- for r in ddgs.text(query, max_results=max_results)
54
- ]
55
-
56
- def get_image_urls(query, max_images=1):
57
- with DDGS() as ddgs:
58
- return [img["image"] for img in ddgs.images(query, max_results=max_images)]
59
-
60
- # --- Research Agent ---
61
- def autonomous_research_agent(topic):
62
- arxiv = get_arxiv_papers(topic)
63
- scholar = get_semantic_scholar_papers(topic)
64
- web = search_duckduckgo_snippets(topic)
65
- images = get_image_urls(topic)
66
-
67
- prompt = f"Topic: {topic}\n\n"
68
-
69
- if images:
70
- prompt += f"![Related Image]({images[0]})\n\n"
71
-
72
- prompt += "## ArXiv:\n" + "\n".join(f"- [{p['title']}]({p['url']})\n> {p['summary']}..." for p in arxiv) + "\n\n"
73
- prompt += "## Semantic Scholar:\n" + "\n".join(f"- [{p['title']}]({p['url']})\n> {p['summary']}..." for p in scholar) + "\n\n"
74
- prompt += "## Web:\n" + "\n".join(f"- [{w['title']}]({w['url']})\n> {w['snippet']}" for w in web) + "\n\n"
75
 
76
- prompt += (
77
- "Now synthesize all this into:\n"
78
- "1. Research gap\n"
79
- "2. Proposed research direction\n"
80
- "3. A full academic narrative (markdown format, formal tone)"
81
- )
82
 
83
- return call_llm([{"role": "user", "content": prompt}], max_tokens=3000)
 
 
84
 
 
 
 
 
85
  # --- Streamlit UI ---
86
  st.set_page_config("Autonomous Research Agent", layout="wide")
87
  st.title("🤖 Autonomous AI Research Assistant")
 
22
  "max_tokens": max_tokens,
23
  "temperature": temperature
24
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
+ try:
27
+ response = requests.post(url, headers=headers, json=data)
28
+ result = response.json()
29
+ except Exception as e:
30
+ raise RuntimeError(f"Failed to get or parse LLM response: {e}")
 
31
 
32
+ if response.status_code != 200:
33
+ err_msg = result.get("error", {}).get("message", "Unknown API error")
34
+ raise RuntimeError(f"OpenRouter API returned error: {err_msg}")
35
 
36
+ if "choices" not in result or not result["choices"]:
37
+ raise RuntimeError(f"Invalid response structure from OpenRouter: {result}")
38
+
39
+ return result["choices"][0]["message"]["content"]
40
  # --- Streamlit UI ---
41
  st.set_page_config("Autonomous Research Agent", layout="wide")
42
  st.title("🤖 Autonomous AI Research Assistant")