Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -19,18 +19,25 @@ from groq import Groq
|
|
| 19 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 20 |
|
| 21 |
# ---------- Tools ----------
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
@tool
|
| 23 |
def wiki_search(query: str) -> str:
|
|
|
|
| 24 |
docs = WikipediaLoader(query=query, load_max_docs=2).load()
|
| 25 |
return "\n\n".join([doc.page_content for doc in docs])
|
| 26 |
|
| 27 |
@tool
|
| 28 |
def web_search(query: str) -> str:
|
|
|
|
| 29 |
docs = TavilySearchResults(max_results=3).invoke(query)
|
| 30 |
return "\n\n".join([doc.page_content for doc in docs])
|
| 31 |
|
| 32 |
@tool
|
| 33 |
def arvix_search(query: str) -> str:
|
|
|
|
| 34 |
docs = ArxivLoader(query=query, load_max_docs=3).load()
|
| 35 |
return "\n\n".join([doc.page_content[:1000] for doc in docs])
|
| 36 |
|
|
|
|
| 19 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 20 |
|
| 21 |
# ---------- Tools ----------
|
| 22 |
+
from langchain_core.tools import tool
|
| 23 |
+
from langchain_community.document_loaders import WikipediaLoader, ArxivLoader
|
| 24 |
+
from langchain_community.tools.tavily_search import TavilySearchResults
|
| 25 |
+
|
| 26 |
@tool
|
| 27 |
def wiki_search(query: str) -> str:
|
| 28 |
+
"""Search Wikipedia for a given query and return content from up to 2 relevant pages."""
|
| 29 |
docs = WikipediaLoader(query=query, load_max_docs=2).load()
|
| 30 |
return "\n\n".join([doc.page_content for doc in docs])
|
| 31 |
|
| 32 |
@tool
|
| 33 |
def web_search(query: str) -> str:
|
| 34 |
+
"""Search the web using the Tavily API and return content from up to 3 search results."""
|
| 35 |
docs = TavilySearchResults(max_results=3).invoke(query)
|
| 36 |
return "\n\n".join([doc.page_content for doc in docs])
|
| 37 |
|
| 38 |
@tool
|
| 39 |
def arvix_search(query: str) -> str:
|
| 40 |
+
"""Search academic papers on Arxiv for a given query and return up to 3 result summaries."""
|
| 41 |
docs = ArxivLoader(query=query, load_max_docs=3).load()
|
| 42 |
return "\n\n".join([doc.page_content[:1000] for doc in docs])
|
| 43 |
|