Datasets:
File size: 3,900 Bytes
f1322a4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
import asyncio
import pathlib
import re
import markdownify
from markdownify import chomp
import urllib.parse
import orjson
import tqdm
import typer
import httpx
from bs4 import BeautifulSoup, Tag
app = typer.Typer()
class MDNConverter(markdownify.MarkdownConverter):
def convert_a(self, el, text, convert_as_inline):
prefix, suffix, text = chomp(text)
if not text:
return ""
return "%s%s%s" % (prefix, text, suffix)
def convert_pre(self, el:Tag, text, convert_as_inline):
if not text:
return ''
code_language = self.options['code_language']
#
if el.previous_sibling and el.previous_sibling.name and el.previous_sibling.name == "div":
if "example-header" in el.previous_sibling.get("class",[]):
code_language = el.previous_sibling.find('span').get_text()
el.previous_sibling.decompose()
# if self.options['code_language_callback']:
# code_language = self.options['code_language_callback'](el) or code_language
return '\n```%s\n%s\n```\n' % (code_language, text)
inst = MDNConverter()
rgx = re.compile(r"(.+)\n```\1")
def quote_fx(matchobj):
return f"\n```{matchobj.group(1)}"
def soup_processor(soup:BeautifulSoup):
base_article = soup.select_one("main#content > article")
[script.decompose() for script in base_article.find_all("script")]
if not base_article:
return None
mod_time = base_article.find("aside",attrs={"class":"metadata"})
if mod_time:
mod_time.decompose()
hydro = base_article.select_one("#hydration")
if hydro:
hydro.decompose()
r = inst.convert_soup(base_article).replace("\n\n\n\n","\n\n").replace("\n\n\n","\n\n")
r = rgx.sub(quote_fx, r).replace("\n \n","\n\n").replace("\n\n```\n","\n```\n\n").rstrip().replace("\t"," ")
data = {"title":soup.find("title").get_text().split("|")[0].strip(),"text":r}
return data
@app.command()
def clean(root:pathlib.Path, output_jsonl:pathlib.Path):
with open(output_jsonl.resolve(),"wb") as f:
for file in root.rglob("*.html"):
if file.is_file() and "docs/MDN/" not in str(file):
soup = BeautifulSoup(file.read_text(encoding="utf-8"),"lxml")
print(file)
r = soup_processor(soup)
f.write(orjson.dumps(r)+b"\n")
async def async_executor(urls:list[str], root:pathlib.Path):
sem = asyncio.Semaphore(64)
client = httpx.AsyncClient(timeout=None)
client.headers["user-agent"] = "curl/7.54.1 (compatible; kinda like curl) " + client.headers["user-agent"] + " mdncrawl/1.0.0 (SiteMap Crawling; +shinon@recursal.ai; Contact if bot errors.)"
pbar = tqdm.tqdm()
async def url_executor(url:str):
pth = urllib.parse.urlparse(url).path[1:]
url_write = (root / pth).resolve()
# print(url_write)
url_write.parent.mkdir(exist_ok=True,parents=True)
async with sem:
response = await client.get(url)
if response.status_code == 200:
if url_write.suffix != ".html":
url_write = url_write.with_suffix(f"{url_write.suffix}.html")
url_write.write_bytes(response.content)
pbar.update(1)
else:
print(response.status_code)
loop = asyncio.get_running_loop()
workers = [loop.create_task(url_executor(url)) for url in urls]
await asyncio.gather(*workers)
pass
@app.command()
def sitemap(xml_file:pathlib.Path, root:pathlib.Path):
root = root.resolve()
soup = BeautifulSoup(xml_file.read_text(encoding="utf-8"), "xml")
urls = [i.get_text() for i in soup.find_all("loc")]
# print(urls)
asyncio.run(async_executor(urls, root))
if __name__ == "__main__":
app() |