Datasets:
Upload scripts/MDNClean.py with huggingface_hub
Browse files- scripts/MDNClean.py +111 -0
scripts/MDNClean.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import pathlib
|
| 3 |
+
import re
|
| 4 |
+
import markdownify
|
| 5 |
+
from markdownify import chomp
|
| 6 |
+
import urllib.parse
|
| 7 |
+
import orjson
|
| 8 |
+
import tqdm
|
| 9 |
+
import typer
|
| 10 |
+
import httpx
|
| 11 |
+
from bs4 import BeautifulSoup, Tag
|
| 12 |
+
|
| 13 |
+
app = typer.Typer()
|
| 14 |
+
|
| 15 |
+
class MDNConverter(markdownify.MarkdownConverter):
|
| 16 |
+
def convert_a(self, el, text, convert_as_inline):
|
| 17 |
+
prefix, suffix, text = chomp(text)
|
| 18 |
+
if not text:
|
| 19 |
+
return ""
|
| 20 |
+
return "%s%s%s" % (prefix, text, suffix)
|
| 21 |
+
|
| 22 |
+
def convert_pre(self, el:Tag, text, convert_as_inline):
|
| 23 |
+
if not text:
|
| 24 |
+
return ''
|
| 25 |
+
|
| 26 |
+
code_language = self.options['code_language']
|
| 27 |
+
#
|
| 28 |
+
if el.previous_sibling and el.previous_sibling.name and el.previous_sibling.name == "div":
|
| 29 |
+
if "example-header" in el.previous_sibling.get("class",[]):
|
| 30 |
+
code_language = el.previous_sibling.find('span').get_text()
|
| 31 |
+
el.previous_sibling.decompose()
|
| 32 |
+
|
| 33 |
+
# if self.options['code_language_callback']:
|
| 34 |
+
# code_language = self.options['code_language_callback'](el) or code_language
|
| 35 |
+
|
| 36 |
+
return '\n```%s\n%s\n```\n' % (code_language, text)
|
| 37 |
+
|
| 38 |
+
inst = MDNConverter()
|
| 39 |
+
|
| 40 |
+
rgx = re.compile(r"(.+)\n```\1")
|
| 41 |
+
|
| 42 |
+
def quote_fx(matchobj):
|
| 43 |
+
return f"\n```{matchobj.group(1)}"
|
| 44 |
+
|
| 45 |
+
def soup_processor(soup:BeautifulSoup):
|
| 46 |
+
base_article = soup.select_one("main#content > article")
|
| 47 |
+
[script.decompose() for script in base_article.find_all("script")]
|
| 48 |
+
if not base_article:
|
| 49 |
+
return None
|
| 50 |
+
mod_time = base_article.find("aside",attrs={"class":"metadata"})
|
| 51 |
+
if mod_time:
|
| 52 |
+
mod_time.decompose()
|
| 53 |
+
hydro = base_article.select_one("#hydration")
|
| 54 |
+
if hydro:
|
| 55 |
+
hydro.decompose()
|
| 56 |
+
|
| 57 |
+
r = inst.convert_soup(base_article).replace("\n\n\n\n","\n\n").replace("\n\n\n","\n\n")
|
| 58 |
+
r = rgx.sub(quote_fx, r).replace("\n \n","\n\n").replace("\n\n```\n","\n```\n\n").rstrip().replace("\t"," ")
|
| 59 |
+
|
| 60 |
+
data = {"title":soup.find("title").get_text().split("|")[0].strip(),"text":r}
|
| 61 |
+
return data
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
@app.command()
|
| 67 |
+
def clean(root:pathlib.Path, output_jsonl:pathlib.Path):
|
| 68 |
+
with open(output_jsonl.resolve(),"wb") as f:
|
| 69 |
+
for file in root.rglob("*.html"):
|
| 70 |
+
if file.is_file() and "docs/MDN/" not in str(file):
|
| 71 |
+
soup = BeautifulSoup(file.read_text(encoding="utf-8"),"lxml")
|
| 72 |
+
print(file)
|
| 73 |
+
r = soup_processor(soup)
|
| 74 |
+
f.write(orjson.dumps(r)+b"\n")
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
async def async_executor(urls:list[str], root:pathlib.Path):
|
| 79 |
+
sem = asyncio.Semaphore(64)
|
| 80 |
+
client = httpx.AsyncClient(timeout=None)
|
| 81 |
+
client.headers["user-agent"] = "curl/7.54.1 (compatible; kinda like curl) " + client.headers["user-agent"] + " mdncrawl/1.0.0 (SiteMap Crawling; +shinon@recursal.ai; Contact if bot errors.)"
|
| 82 |
+
pbar = tqdm.tqdm()
|
| 83 |
+
async def url_executor(url:str):
|
| 84 |
+
pth = urllib.parse.urlparse(url).path[1:]
|
| 85 |
+
url_write = (root / pth).resolve()
|
| 86 |
+
# print(url_write)
|
| 87 |
+
url_write.parent.mkdir(exist_ok=True,parents=True)
|
| 88 |
+
async with sem:
|
| 89 |
+
response = await client.get(url)
|
| 90 |
+
if response.status_code == 200:
|
| 91 |
+
if url_write.suffix != ".html":
|
| 92 |
+
url_write = url_write.with_suffix(f"{url_write.suffix}.html")
|
| 93 |
+
url_write.write_bytes(response.content)
|
| 94 |
+
pbar.update(1)
|
| 95 |
+
else:
|
| 96 |
+
print(response.status_code)
|
| 97 |
+
loop = asyncio.get_running_loop()
|
| 98 |
+
workers = [loop.create_task(url_executor(url)) for url in urls]
|
| 99 |
+
await asyncio.gather(*workers)
|
| 100 |
+
pass
|
| 101 |
+
|
| 102 |
+
@app.command()
|
| 103 |
+
def sitemap(xml_file:pathlib.Path, root:pathlib.Path):
|
| 104 |
+
root = root.resolve()
|
| 105 |
+
soup = BeautifulSoup(xml_file.read_text(encoding="utf-8"), "xml")
|
| 106 |
+
urls = [i.get_text() for i in soup.find_all("loc")]
|
| 107 |
+
# print(urls)
|
| 108 |
+
asyncio.run(async_executor(urls, root))
|
| 109 |
+
|
| 110 |
+
if __name__ == "__main__":
|
| 111 |
+
app()
|