Spaces:
Sleeping
Sleeping
File size: 8,691 Bytes
67d287e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 | """
Web-related tools for the GAIA Agent.
Includes web search, Wikipedia lookup, arXiv search, and webpage fetching.
"""
import os
import re
import requests
from langchain_core.tools import tool
from tavily import TavilyClient
import wikipedia
import arxiv
from dotenv import load_dotenv
load_dotenv()
@tool
def web_search(query: str, include_content: bool = True, max_results: int = 5) -> str:
"""Search the web for current information with full page content.
Use this for facts, news, people, places, events, or anything you need to look up.
Returns search results WITH full page content, so you can get detailed information directly.
Args:
query: The search query
include_content: If True, includes full page content (default: True)
max_results: Number of results to return (default: 5)
"""
try:
client = TavilyClient(api_key=os.getenv("TAVILY_API_KEY"))
response = client.search(
query,
max_results=max_results,
include_raw_content=include_content, # Get full page content
include_answer=True, # Get a direct answer if available
)
output = []
# Include Tavily's direct answer if available
if response.get("answer"):
output.append(f"📌 DIRECT ANSWER: {response['answer']}")
output.append("=" * 50)
results = response.get("results", [])
if not results:
return "No search results found."
for i, r in enumerate(results, 1):
output.append(f"\n[{i}] {r.get('title', 'N/A')}")
output.append(f"URL: {r.get('url', 'N/A')}")
output.append(f"Snippet: {r.get('content', 'N/A')}")
# Include full page content if available
raw_content = r.get('raw_content')
if raw_content:
# Truncate to reasonable length per result
content_preview = raw_content[:3000]
if len(raw_content) > 3000:
content_preview += "\n...[content truncated]"
output.append(f"\nFull Content:\n{content_preview}")
output.append("-" * 40)
return "\n".join(output)
except Exception as e:
return f"Search error: {str(e)}"
@tool
def wikipedia_lookup(topic: str) -> str:
"""Look up a topic on Wikipedia for detailed encyclopedic information.
Args:
topic: The topic to look up
"""
try:
search_results = wikipedia.search(topic, results=3)
if not search_results:
return f"No Wikipedia article found for: {topic}"
try:
page = wikipedia.page(search_results[0], auto_suggest=False)
return f"Title: {page.title}\n\nSummary:\n{page.summary[:4000]}"
except wikipedia.DisambiguationError as e:
if e.options:
page = wikipedia.page(e.options[0], auto_suggest=False)
return f"Title: {page.title}\n\nSummary:\n{page.summary[:4000]}"
return f"Multiple matches found: {e.options[:5]}"
except wikipedia.PageError:
return f"Page not found: {search_results[0]}"
except Exception as e:
return f"Wikipedia error: {str(e)}"
@tool
def arxiv_search(query: str, max_results: int = 5) -> str:
"""Search arXiv for academic papers and research articles.
Use this for scientific papers, research, preprints, and academic publications.
Returns paper titles, authors, abstracts, and arXiv IDs.
Args:
query: Search query (can include author names, titles, or topics)
max_results: Maximum number of results to return (default: 5)
"""
try:
# Create arXiv client and search
client = arxiv.Client()
search = arxiv.Search(
query=query,
max_results=max_results,
sort_by=arxiv.SortCriterion.Relevance
)
results = list(client.results(search))
if not results:
return f"No arXiv papers found for: {query}"
output = []
for i, paper in enumerate(results, 1):
output.append(f"[{i}] {paper.title}")
output.append(f" Authors: {', '.join(a.name for a in paper.authors[:5])}")
if len(paper.authors) > 5:
output[-1] += f" et al. ({len(paper.authors)} total)"
output.append(f" Published: {paper.published.strftime('%Y-%m-%d')}")
output.append(f" arXiv ID: {paper.entry_id.split('/')[-1]}")
output.append(f" Categories: {', '.join(paper.categories[:3])}")
output.append(f" PDF: {paper.pdf_url}")
# Truncate abstract to ~500 chars
abstract = paper.summary.replace('\n', ' ')[:500]
if len(paper.summary) > 500:
abstract += "..."
output.append(f" Abstract: {abstract}")
output.append("---")
return "\n".join(output)
except Exception as e:
return f"arXiv search error: {str(e)}"
@tool
def webpage_fetch(url: str, extract_links: bool = False) -> str:
"""Fetch and read the content of a webpage URL.
Use this to read the full content of a page from search results.
After web_search returns URLs, use this tool to get detailed information.
Args:
url: The URL to fetch (http or https)
extract_links: If True, also extract and list links found on the page
"""
try:
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}
response = requests.get(url, headers=headers, timeout=30)
response.raise_for_status()
html = response.text
# Try to use BeautifulSoup for better parsing
try:
from bs4 import BeautifulSoup
soup = BeautifulSoup(html, 'html.parser')
# Remove script and style elements
for element in soup(['script', 'style', 'nav', 'footer', 'header', 'aside']):
element.decompose()
# Get title
title = soup.title.string if soup.title else "No title"
# Get main text content
text = soup.get_text(separator='\n', strip=True)
# Clean up excessive whitespace
lines = [line.strip() for line in text.splitlines() if line.strip()]
text = '\n'.join(lines)
# Extract links if requested
links_text = ""
if extract_links:
links = []
for a in soup.find_all('a', href=True)[:20]: # Limit to 20 links
href = a['href']
link_text = a.get_text(strip=True)[:50]
if href.startswith('http'):
links.append(f" - {link_text}: {href}")
if links:
links_text = "\n\nLinks found:\n" + "\n".join(links)
# Truncate to reasonable length
if len(text) > 8000:
text = text[:8000] + "\n...[truncated]"
return f"Title: {title}\nURL: {url}\n\nContent:\n{text}{links_text}"
except ImportError:
# Fallback: basic HTML tag stripping without BeautifulSoup
# Remove script and style content
html = re.sub(r'<script[^>]*>.*?</script>', '', html, flags=re.DOTALL | re.IGNORECASE)
html = re.sub(r'<style[^>]*>.*?</style>', '', html, flags=re.DOTALL | re.IGNORECASE)
# Remove HTML tags
text = re.sub(r'<[^>]+>', ' ', html)
# Decode HTML entities
import html as html_module
text = html_module.unescape(text)
# Clean up whitespace
text = re.sub(r'\s+', ' ', text).strip()
# Truncate
if len(text) > 8000:
text = text[:8000] + "...[truncated]"
return f"URL: {url}\n\nContent:\n{text}\n\n(Note: Install beautifulsoup4 for better parsing: pip install beautifulsoup4)"
except requests.exceptions.Timeout:
return f"Error: Request timed out for URL: {url}"
except requests.exceptions.HTTPError as e:
return f"HTTP Error {e.response.status_code}: Could not fetch {url}"
except Exception as e:
return f"Error fetching webpage: {str(e)}"
|