MDN / scripts /MDNClean.py
KaraKaraWitch's picture
Upload scripts/MDNClean.py with huggingface_hub
f1322a4 verified
import asyncio
import pathlib
import re
import markdownify
from markdownify import chomp
import urllib.parse
import orjson
import tqdm
import typer
import httpx
from bs4 import BeautifulSoup, Tag
app = typer.Typer()
class MDNConverter(markdownify.MarkdownConverter):
def convert_a(self, el, text, convert_as_inline):
prefix, suffix, text = chomp(text)
if not text:
return ""
return "%s%s%s" % (prefix, text, suffix)
def convert_pre(self, el:Tag, text, convert_as_inline):
if not text:
return ''
code_language = self.options['code_language']
#
if el.previous_sibling and el.previous_sibling.name and el.previous_sibling.name == "div":
if "example-header" in el.previous_sibling.get("class",[]):
code_language = el.previous_sibling.find('span').get_text()
el.previous_sibling.decompose()
# if self.options['code_language_callback']:
# code_language = self.options['code_language_callback'](el) or code_language
return '\n```%s\n%s\n```\n' % (code_language, text)
inst = MDNConverter()
rgx = re.compile(r"(.+)\n```\1")
def quote_fx(matchobj):
return f"\n```{matchobj.group(1)}"
def soup_processor(soup:BeautifulSoup):
base_article = soup.select_one("main#content > article")
[script.decompose() for script in base_article.find_all("script")]
if not base_article:
return None
mod_time = base_article.find("aside",attrs={"class":"metadata"})
if mod_time:
mod_time.decompose()
hydro = base_article.select_one("#hydration")
if hydro:
hydro.decompose()
r = inst.convert_soup(base_article).replace("\n\n\n\n","\n\n").replace("\n\n\n","\n\n")
r = rgx.sub(quote_fx, r).replace("\n \n","\n\n").replace("\n\n```\n","\n```\n\n").rstrip().replace("\t"," ")
data = {"title":soup.find("title").get_text().split("|")[0].strip(),"text":r}
return data
@app.command()
def clean(root:pathlib.Path, output_jsonl:pathlib.Path):
with open(output_jsonl.resolve(),"wb") as f:
for file in root.rglob("*.html"):
if file.is_file() and "docs/MDN/" not in str(file):
soup = BeautifulSoup(file.read_text(encoding="utf-8"),"lxml")
print(file)
r = soup_processor(soup)
f.write(orjson.dumps(r)+b"\n")
async def async_executor(urls:list[str], root:pathlib.Path):
sem = asyncio.Semaphore(64)
client = httpx.AsyncClient(timeout=None)
client.headers["user-agent"] = "curl/7.54.1 (compatible; kinda like curl) " + client.headers["user-agent"] + " mdncrawl/1.0.0 (SiteMap Crawling; +shinon@recursal.ai; Contact if bot errors.)"
pbar = tqdm.tqdm()
async def url_executor(url:str):
pth = urllib.parse.urlparse(url).path[1:]
url_write = (root / pth).resolve()
# print(url_write)
url_write.parent.mkdir(exist_ok=True,parents=True)
async with sem:
response = await client.get(url)
if response.status_code == 200:
if url_write.suffix != ".html":
url_write = url_write.with_suffix(f"{url_write.suffix}.html")
url_write.write_bytes(response.content)
pbar.update(1)
else:
print(response.status_code)
loop = asyncio.get_running_loop()
workers = [loop.create_task(url_executor(url)) for url in urls]
await asyncio.gather(*workers)
pass
@app.command()
def sitemap(xml_file:pathlib.Path, root:pathlib.Path):
root = root.resolve()
soup = BeautifulSoup(xml_file.read_text(encoding="utf-8"), "xml")
urls = [i.get_text() for i in soup.find_all("loc")]
# print(urls)
asyncio.run(async_executor(urls, root))
if __name__ == "__main__":
app()