UTS_VLC / scripts /browser_downloader.py
Vu Anh
Add dataset card and scripts
e7cee37
#!/usr/bin/env python3
"""
Vietnamese Legal Corpus Browser Downloader
Uses Playwright to download laws from thuvienphapluat.vn
"""
import re
import sqlite3
import asyncio
from pathlib import Path
from datetime import datetime
import click
from rich.console import Console
from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TaskProgressColumn
console = Console()
DATA_DIR = Path(__file__).parent.parent / "data"
DB_PATH = Path(__file__).parent.parent / "vietnam_laws.sqlite"
# Known URL patterns for major codes
KNOWN_URLS = {
"91/2015/QH13": "https://thuvienphapluat.vn/van-ban/Quyen-dan-su/Bo-luat-dan-su-2015-296215.aspx",
"92/2015/QH13": "https://thuvienphapluat.vn/van-ban/Thu-tuc-To-tung/Bo-luat-to-tung-dan-su-2015-296861.aspx",
"100/2015/QH13": "https://thuvienphapluat.vn/van-ban/Trach-nhiem-hinh-su/Bo-luat-hinh-su-2015-296661.aspx",
"101/2015/QH13": "https://thuvienphapluat.vn/van-ban/Trach-nhiem-hinh-su/Bo-luat-to-tung-hinh-su-2015-296884.aspx",
"95/2015/QH13": "https://thuvienphapluat.vn/van-ban/Giao-thong-Van-tai/Bo-luat-hang-hai-Viet-Nam-2015-298822.aspx",
"45/2019/QH14": "https://thuvienphapluat.vn/van-ban/Lao-dong-Tien-luong/Bo-luat-Lao-dong-2019-428584.aspx",
}
def slugify(text: str) -> str:
"""Convert Vietnamese text to slug."""
vietnamese_map = {
'à': 'a', 'á': 'a', 'ả': 'a', 'ã': 'a', 'ạ': 'a',
'ă': 'a', 'ằ': 'a', 'ắ': 'a', 'ẳ': 'a', 'ẵ': 'a', 'ặ': 'a',
'â': 'a', 'ầ': 'a', 'ấ': 'a', 'ẩ': 'a', 'ẫ': 'a', 'ậ': 'a',
'đ': 'd',
'è': 'e', 'é': 'e', 'ẻ': 'e', 'ẽ': 'e', 'ẹ': 'e',
'ê': 'e', 'ề': 'e', 'ế': 'e', 'ể': 'e', 'ễ': 'e', 'ệ': 'e',
'ì': 'i', 'í': 'i', 'ỉ': 'i', 'ĩ': 'i', 'ị': 'i',
'ò': 'o', 'ó': 'o', 'ỏ': 'o', 'õ': 'o', 'ọ': 'o',
'ô': 'o', 'ồ': 'o', 'ố': 'o', 'ổ': 'o', 'ỗ': 'o', 'ộ': 'o',
'ơ': 'o', 'ờ': 'o', 'ớ': 'o', 'ở': 'o', 'ỡ': 'o', 'ợ': 'o',
'ù': 'u', 'ú': 'u', 'ủ': 'u', 'ũ': 'u', 'ụ': 'u',
'ư': 'u', 'ừ': 'u', 'ứ': 'u', 'ử': 'u', 'ữ': 'u', 'ự': 'u',
'ỳ': 'y', 'ý': 'y', 'ỷ': 'y', 'ỹ': 'y', 'ỵ': 'y',
}
text = text.lower()
for viet, ascii_char in vietnamese_map.items():
text = text.replace(viet, ascii_char)
text = re.sub(r'[^a-z0-9]+', '-', text)
text = re.sub(r'-+', '-', text)
return text.strip('-')
def create_front_matter(metadata: dict) -> str:
"""Create YAML front matter."""
lines = ["---"]
for key, value in metadata.items():
if value is not None:
if isinstance(value, str) and ('\n' in value or ':' in value or '"' in value):
value = value.replace('"', '\\"').replace('\n', ' ')
lines.append(f'{key}: "{value}"')
else:
lines.append(f"{key}: {value}")
lines.append("---")
return "\n".join(lines)
def get_laws_from_db() -> list[dict]:
"""Get laws from SQLite database."""
if not DB_PATH.exists():
return []
conn = sqlite3.connect(DB_PATH)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
laws = []
cursor.execute("SELECT * FROM codes")
for row in cursor.fetchall():
laws.append({
"id": f"code-{row['id']}",
"type": "code",
"name": row["name"],
"name_vi": row["name_vi"],
"year": row["year"],
"document_number": row["document_number"],
"effective_date": row["effective_date"],
"status": row["status"],
})
cursor.execute("SELECT * FROM laws")
for row in cursor.fetchall():
laws.append({
"id": f"law-{row['id']}",
"type": "law",
"name": row["name"],
"name_vi": row["name_vi"],
"year": row["year"],
"document_number": row["document_number"],
"effective_date": row["effective_date"],
"status": row["status"],
})
conn.close()
return laws
def save_law_file(law: dict, content: str | None = None, url: str | None = None) -> Path:
"""Save law to markdown file."""
DATA_DIR.mkdir(parents=True, exist_ok=True)
law_type = law.get("type", "law")
year = law.get("year", "unknown")
name_vi = law.get("name_vi", law.get("name", "unknown"))
slug = slugify(name_vi)
prefix = "code" if law_type == "code" else "law"
filename = f"{prefix}-{year}-{slug}.md"
filepath = DATA_DIR / filename
front_matter_data = {
"title": law.get("name_vi", law.get("name")),
"title_en": law.get("name"),
"type": law_type,
"year": year,
"document_number": law.get("document_number"),
"effective_date": law.get("effective_date"),
"status": law.get("status", "Active"),
"url": url,
"downloaded_at": datetime.now().isoformat(),
}
md_content = create_front_matter(front_matter_data)
md_content += "\n\n"
md_content += f"# {law.get('name_vi', law.get('name'))}\n\n"
if law.get("name"):
md_content += f"**English:** {law['name']}\n\n"
if law.get("document_number"):
md_content += f"**Số hiệu:** {law['document_number']}\n\n"
if law.get("effective_date"):
md_content += f"**Ngày hiệu lực:** {law['effective_date']}\n\n"
if content:
md_content += "---\n\n"
md_content += content
else:
md_content += "*Nội dung chưa được tải xuống.*\n"
filepath.write_text(md_content, encoding="utf-8")
return filepath
async def download_with_playwright(laws: list[dict], delay: float = 2.0):
"""Download laws using Playwright."""
try:
from playwright.async_api import async_playwright
except ImportError:
console.print("[red]Playwright not installed. Run: uv pip install playwright && playwright install chromium[/red]")
return
async with async_playwright() as p:
browser = await p.chromium.launch(headless=True)
context = await browser.new_context(
user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36"
)
page = await context.new_page()
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
BarColumn(),
TaskProgressColumn(),
console=console,
) as progress:
task = progress.add_task("Downloading...", total=len(laws))
for law in laws:
name = law.get("name_vi", law.get("name", "Unknown"))
doc_num = law.get("document_number", "")
progress.update(task, description=f"[cyan]{name[:40]}[/cyan]")
content = None
url = KNOWN_URLS.get(doc_num)
if not url:
# Search on thuvienphapluat.vn
try:
search_url = f"https://thuvienphapluat.vn/page/tim-van-ban.aspx?keyword={doc_num or name}"
await page.goto(search_url, wait_until="networkidle", timeout=30000)
await asyncio.sleep(1)
# Find first result link
link = await page.query_selector("a[href*='/van-ban/']")
if link:
url = await link.get_attribute("href")
if url and not url.startswith("http"):
url = f"https://thuvienphapluat.vn{url}"
except Exception as e:
console.print(f"[yellow]Search failed: {name}: {e}[/yellow]")
if url:
try:
await page.goto(url, wait_until="networkidle", timeout=30000)
await asyncio.sleep(1)
# Extract content
content_elem = await page.query_selector(".content1, .toanvancontent, .fulltext")
if content_elem:
content = await content_elem.inner_text()
except Exception as e:
console.print(f"[yellow]Fetch failed: {name}: {e}[/yellow]")
save_law_file(law, content, url)
progress.advance(task)
await asyncio.sleep(delay)
await browser.close()
@click.command()
@click.option("--delay", default=2.0, help="Delay between requests (seconds)")
@click.option("--limit", default=0, help="Limit number of laws (0 = all)")
def download(delay: float, limit: int):
"""Download laws using browser automation."""
console.print("[bold blue]VLC Browser Downloader[/bold blue]\n")
laws = get_laws_from_db()
if not laws:
console.print("[yellow]No laws in database[/yellow]")
return
if limit > 0:
laws = laws[:limit]
console.print(f"Found [green]{len(laws)}[/green] laws to download")
console.print(f"Output: [cyan]{DATA_DIR}[/cyan]\n")
asyncio.run(download_with_playwright(laws, delay))
console.print(f"\n[green]Done![/green] Files saved to {DATA_DIR}")
if __name__ == "__main__":
download()