Spaces:
Sleeping
Sleeping
File size: 1,557 Bytes
04b5e7e 58b9d07 04b5e7e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 | from __future__ import annotations
import re
from typing import Optional
import requests
from bs4 import BeautifulSoup
from ddgs import DDGS
USER_AGENT = "Mozilla/5.0 (compatible; HF-Benchmark-Agent/1.0)"
def web_search_first(query: str, max_results: int = 5) -> list[dict]:
results: list[dict] = []
try:
with DDGS() as ddgs:
for r in ddgs.text(query, max_results=max_results):
results.append(r)
except Exception:
return []
return results
def fetch_url_text(url: str, max_chars: int = 12000) -> str:
try:
resp = requests.get(url, headers={"User-Agent": USER_AGENT}, timeout=20)
resp.raise_for_status()
except Exception:
return ""
html = resp.text
soup = BeautifulSoup(html, "html.parser")
for tag in soup(["script", "style", "noscript"]):
tag.decompose()
text = soup.get_text("\n")
text = re.sub(r"\n{2,}", "\n", text)
return text[:max_chars].strip()
def search_and_fetch(query: str, max_results: int = 3, max_chars: int = 12000) -> str:
results = web_search_first(query, max_results=max_results)
chunks = []
for r in results[:max_results]:
title = r.get("title", "")
href = r.get("href", "")
body = r.get("body", "")
page_text = fetch_url_text(href, max_chars=max_chars // max(1, max_results)) if href else ""
chunks.append(
f"[TITLE]\n{title}\n[URL]\n{href}\n[SNIPPET]\n{body}\n[PAGE TEXT]\n{page_text}"
)
return "\n\n".join(chunks).strip() |