|
|
""" |
|
|
id: scraper |
|
|
title: HTML Scraper |
|
|
author: admin |
|
|
description: Extract title, text, and links from an HTML page; accepts url or raw html. |
|
|
version: 0.1.0 |
|
|
license: Proprietary |
|
|
""" |
|
|
|
|
|
import re |
|
|
import html |
|
|
|
|
|
try: |
|
|
from bs4 import BeautifulSoup |
|
|
except Exception: |
|
|
BeautifulSoup = None |
|
|
|
|
|
|
|
|
class Tools: |
|
|
def extract(self, html_text: str = "", url: str = "") -> dict: |
|
|
text = html_text |
|
|
if not text and url: |
|
|
import urllib.request |
|
|
|
|
|
text = ( |
|
|
urllib.request.urlopen(url, timeout=20) |
|
|
.read() |
|
|
.decode("utf-8", errors="replace") |
|
|
) |
|
|
if not text: |
|
|
return {"error": "no html or url provided"} |
|
|
if BeautifulSoup: |
|
|
soup = BeautifulSoup(text, "html.parser") |
|
|
title = ( |
|
|
soup.title.string.strip() if soup.title and soup.title.string else "" |
|
|
) |
|
|
body = soup.get_text(" ", strip=True) |
|
|
links = [] |
|
|
for a in soup.find_all("a", href=True): |
|
|
href = urllib.parse.urljoin(url, a["href"]) if url else a["href"] |
|
|
links.append({"href": href, "text": a.get_text(strip=True)}) |
|
|
return {"title": title, "text": body[:20000], "links": links[:200]} |
|
|
|
|
|
title = "" |
|
|
m = re.search(r"<title[^>]*>(.*?)</title>", text, re.I | re.S) |
|
|
if m: |
|
|
title = html.unescape(re.sub("<[^<]+?>", "", m.group(1))).strip() |
|
|
body = re.sub( |
|
|
"<script[\s\S]*?</script>|<style[\s\S]*?</style>", "", text, flags=re.I |
|
|
) |
|
|
body = re.sub("<[^<]+?>", " ", body) |
|
|
body = re.sub("\s+", " ", body).strip() |
|
|
links = [ |
|
|
{"href": html.unescape(h)} for h in re.findall(r'href="([^"]+)"', text) |
|
|
][:200] |
|
|
return {"title": title, "text": body[:20000], "links": links} |
|
|
|