Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,9 +4,70 @@ import requests
|
|
| 4 |
import pytz
|
| 5 |
import yaml
|
| 6 |
from tools.final_answer import FinalAnswerTool
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
from Gradio_UI import GradioUI
|
| 9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
# Below is an example of a tool that does nothing. Amaze us with your creativity !
|
| 11 |
@tool
|
| 12 |
def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
|
|
@@ -55,7 +116,7 @@ with open("prompts.yaml", 'r') as stream:
|
|
| 55 |
|
| 56 |
agent = CodeAgent(
|
| 57 |
model=model,
|
| 58 |
-
tools=[final_answer], ## add your tools here (don't remove final answer)
|
| 59 |
max_steps=6,
|
| 60 |
verbosity_level=1,
|
| 61 |
grammar=None,
|
|
|
|
| 4 |
import pytz
|
| 5 |
import yaml
|
| 6 |
from tools.final_answer import FinalAnswerTool
|
| 7 |
+
import time, json
|
| 8 |
+
from bs4 import BeautifulSoup
|
| 9 |
+
from urllib.parse import urljoin, urlparse
|
| 10 |
+
import urllib.robotparser as robotparser
|
| 11 |
|
| 12 |
from Gradio_UI import GradioUI
|
| 13 |
|
| 14 |
+
def allowed_by_robots(url: str, user_agent: str = DEFAULT_HEADERS["User-Agent"]) -> bool:
|
| 15 |
+
try:
|
| 16 |
+
parsed = urlparse(url)
|
| 17 |
+
robots_url = f"{parsed.scheme}://{parsed.netloc}/robots.txt"
|
| 18 |
+
rp = robotparser.RobotFileParser()
|
| 19 |
+
rp.set_url(robots_url)
|
| 20 |
+
rp.read()
|
| 21 |
+
return rp.can_fetch(user_agent, url)
|
| 22 |
+
except Exception:
|
| 23 |
+
# If robots can't be read, err on the safe side and allow
|
| 24 |
+
return True
|
| 25 |
+
|
| 26 |
+
@tool
|
| 27 |
+
def scrape_html(url: str, selector: str, attr: str = "text", max_items: int = 50, delay_s: float = 0.0) -> str:
|
| 28 |
+
"""Scrape elements from a web page using a CSS selector.
|
| 29 |
+
Args:
|
| 30 |
+
url: Full URL of the page to scrape.
|
| 31 |
+
selector: CSS selector for the elements to extract (e.g., 'h2 a', 'div.price').
|
| 32 |
+
attr: What to return for each element: 'text' for text content, or an attribute name like 'href'/'src'.
|
| 33 |
+
max_items: Maximum number of elements to return.
|
| 34 |
+
delay_s: Optional polite delay (seconds) before the request.
|
| 35 |
+
Returns:
|
| 36 |
+
A JSON string of the form {"url": ..., "count": N, "items": [...]}
|
| 37 |
+
"""
|
| 38 |
+
try:
|
| 39 |
+
if delay_s > 0:
|
| 40 |
+
time.sleep(delay_s)
|
| 41 |
+
|
| 42 |
+
if not allowed_by_robots(url, DEFAULT_HEADERS["User-Agent"]):
|
| 43 |
+
return json.dumps({"error": f"Blocked by robots.txt for {url}"})
|
| 44 |
+
|
| 45 |
+
resp = requests.get(url, headers=DEFAULT_HEADERS, timeout=15)
|
| 46 |
+
resp.raise_for_status()
|
| 47 |
+
|
| 48 |
+
soup = BeautifulSoup(resp.text, "html.parser")
|
| 49 |
+
elements = soup.select(selector)
|
| 50 |
+
items = []
|
| 51 |
+
|
| 52 |
+
for el in elements[:max_items]:
|
| 53 |
+
if attr == "text":
|
| 54 |
+
items.append(el.get_text(strip=True))
|
| 55 |
+
else:
|
| 56 |
+
# Resolve relative URLs for href/src if present
|
| 57 |
+
val = el.get(attr)
|
| 58 |
+
if val and attr in ("href", "src"):
|
| 59 |
+
val = urljoin(url, val)
|
| 60 |
+
items.append(val)
|
| 61 |
+
|
| 62 |
+
return json.dumps({"url": url, "count": len(items), "items": items}, ensure_ascii=False)
|
| 63 |
+
except requests.exceptions.RequestException as e:
|
| 64 |
+
return json.dumps({"error": f"Network error: {e}"})
|
| 65 |
+
except Exception as e:
|
| 66 |
+
return json.dumps({"error": f"Parse error: {e}"})
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
|
| 71 |
# Below is an example of a tool that does nothing. Amaze us with your creativity !
|
| 72 |
@tool
|
| 73 |
def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
|
|
|
|
| 116 |
|
| 117 |
agent = CodeAgent(
|
| 118 |
model=model,
|
| 119 |
+
tools=[final_answer, scrape_html], ## add your tools here (don't remove final answer)
|
| 120 |
max_steps=6,
|
| 121 |
verbosity_level=1,
|
| 122 |
grammar=None,
|