Spaces:
Sleeping
Sleeping
| from __future__ import annotations | |
| import re | |
| from typing import Optional | |
| import requests | |
| from bs4 import BeautifulSoup | |
| from ddgs import DDGS | |
| USER_AGENT = "Mozilla/5.0 (compatible; HF-Benchmark-Agent/1.0)" | |
| def web_search_first(query: str, max_results: int = 5) -> list[dict]: | |
| results: list[dict] = [] | |
| try: | |
| with DDGS() as ddgs: | |
| for r in ddgs.text(query, max_results=max_results): | |
| results.append(r) | |
| except Exception: | |
| return [] | |
| return results | |
| def fetch_url_text(url: str, max_chars: int = 12000) -> str: | |
| try: | |
| resp = requests.get(url, headers={"User-Agent": USER_AGENT}, timeout=20) | |
| resp.raise_for_status() | |
| except Exception: | |
| return "" | |
| html = resp.text | |
| soup = BeautifulSoup(html, "html.parser") | |
| for tag in soup(["script", "style", "noscript"]): | |
| tag.decompose() | |
| text = soup.get_text("\n") | |
| text = re.sub(r"\n{2,}", "\n", text) | |
| return text[:max_chars].strip() | |
| def search_and_fetch(query: str, max_results: int = 3, max_chars: int = 12000) -> str: | |
| results = web_search_first(query, max_results=max_results) | |
| chunks = [] | |
| for r in results[:max_results]: | |
| title = r.get("title", "") | |
| href = r.get("href", "") | |
| body = r.get("body", "") | |
| page_text = fetch_url_text(href, max_chars=max_chars // max(1, max_results)) if href else "" | |
| chunks.append( | |
| f"[TITLE]\n{title}\n[URL]\n{href}\n[SNIPPET]\n{body}\n[PAGE TEXT]\n{page_text}" | |
| ) | |
| return "\n\n".join(chunks).strip() |