| """ |
| id: sitemap |
| title: Robots & Sitemaps |
| author: admin |
| description: Fetch robots.txt and discover/parse sitemap URLs. |
| version: 0.1.0 |
| license: Proprietary |
| """ |
|
|
| import urllib.request |
| import urllib.parse |
| import xml.etree.ElementTree as ET |
|
|
|
|
| class Tools: |
| def robots(self, base_url: str) -> dict: |
| base = base_url.rstrip("/") |
| robots_url = base + "/robots.txt" |
| try: |
| txt = ( |
| urllib.request.urlopen(robots_url, timeout=15) |
| .read() |
| .decode("utf-8", errors="replace") |
| ) |
| sitemaps = [] |
| for line in txt.splitlines(): |
| if line.lower().startswith("sitemap:"): |
| sitemaps.append(line.split(":", 1)[1].strip()) |
| return {"robots": txt[:10000], "sitemaps": sitemaps} |
| except Exception as e: |
| return {"error": str(e)} |
|
|
| def parse_sitemap(self, sitemap_url: str, max_urls: int = 500) -> dict: |
| try: |
| xml = urllib.request.urlopen(sitemap_url, timeout=20).read() |
| root = ET.fromstring(xml) |
| ns = "{http://www.sitemaps.org/schemas/sitemap/0.9}" |
| urls = [] |
| for url in root.findall(".//" + ns + "url"): |
| loc = url.find(ns + "loc") |
| if loc is not None and loc.text: |
| urls.append(loc.text.strip()) |
| if len(urls) >= max_urls: |
| break |
| return {"count": len(urls), "urls": urls} |
| except Exception as e: |
| return {"error": str(e)} |
|
|