|
|
|
|
|
""" |
|
|
Vietnamese Legal Corpus Downloader |
|
|
Downloads laws and codes from thuvienphapluat.vn |
|
|
""" |
|
|
|
|
|
import re |
|
|
import time |
|
|
import sqlite3 |
|
|
from pathlib import Path |
|
|
from datetime import datetime |
|
|
from urllib.parse import urljoin |
|
|
|
|
|
import httpx |
|
|
import click |
|
|
from bs4 import BeautifulSoup |
|
|
from rich.console import Console |
|
|
from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TaskProgressColumn |
|
|
|
|
|
console = Console() |
|
|
|
|
|
BASE_URL = "https://thuvienphapluat.vn" |
|
|
SEARCH_URL = "https://thuvienphapluat.vn/page/tim-van-ban.aspx" |
|
|
DATA_DIR = Path(__file__).parent.parent / "data" |
|
|
DB_PATH = Path(__file__).parent.parent / "vietnam_laws.sqlite" |
|
|
|
|
|
HEADERS = { |
|
|
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", |
|
|
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", |
|
|
"Accept-Language": "vi-VN,vi;q=0.9,en;q=0.8", |
|
|
"Referer": "https://thuvienphapluat.vn/", |
|
|
} |
|
|
|
|
|
|
|
|
def slugify(text: str) -> str: |
|
|
"""Convert Vietnamese text to slug.""" |
|
|
|
|
|
vietnamese_map = { |
|
|
'à': 'a', 'á': 'a', 'ả': 'a', 'ã': 'a', 'ạ': 'a', |
|
|
'ă': 'a', 'ằ': 'a', 'ắ': 'a', 'ẳ': 'a', 'ẵ': 'a', 'ặ': 'a', |
|
|
'â': 'a', 'ầ': 'a', 'ấ': 'a', 'ẩ': 'a', 'ẫ': 'a', 'ậ': 'a', |
|
|
'đ': 'd', |
|
|
'è': 'e', 'é': 'e', 'ẻ': 'e', 'ẽ': 'e', 'ẹ': 'e', |
|
|
'ê': 'e', 'ề': 'e', 'ế': 'e', 'ể': 'e', 'ễ': 'e', 'ệ': 'e', |
|
|
'ì': 'i', 'í': 'i', 'ỉ': 'i', 'ĩ': 'i', 'ị': 'i', |
|
|
'ò': 'o', 'ó': 'o', 'ỏ': 'o', 'õ': 'o', 'ọ': 'o', |
|
|
'ô': 'o', 'ồ': 'o', 'ố': 'o', 'ổ': 'o', 'ỗ': 'o', 'ộ': 'o', |
|
|
'ơ': 'o', 'ờ': 'o', 'ớ': 'o', 'ở': 'o', 'ỡ': 'o', 'ợ': 'o', |
|
|
'ù': 'u', 'ú': 'u', 'ủ': 'u', 'ũ': 'u', 'ụ': 'u', |
|
|
'ư': 'u', 'ừ': 'u', 'ứ': 'u', 'ử': 'u', 'ữ': 'u', 'ự': 'u', |
|
|
'ỳ': 'y', 'ý': 'y', 'ỷ': 'y', 'ỹ': 'y', 'ỵ': 'y', |
|
|
} |
|
|
|
|
|
text = text.lower() |
|
|
for viet, ascii_char in vietnamese_map.items(): |
|
|
text = text.replace(viet, ascii_char) |
|
|
|
|
|
|
|
|
text = re.sub(r'[^a-z0-9]+', '-', text) |
|
|
text = re.sub(r'-+', '-', text) |
|
|
return text.strip('-') |
|
|
|
|
|
|
|
|
def create_front_matter(metadata: dict) -> str: |
|
|
"""Create YAML front matter for markdown file.""" |
|
|
lines = ["---"] |
|
|
for key, value in metadata.items(): |
|
|
if value is not None: |
|
|
if isinstance(value, str) and ('\n' in value or ':' in value or '"' in value): |
|
|
|
|
|
value = value.replace('"', '\\"') |
|
|
lines.append(f'{key}: "{value}"') |
|
|
else: |
|
|
lines.append(f"{key}: {value}") |
|
|
lines.append("---") |
|
|
return "\n".join(lines) |
|
|
|
|
|
|
|
|
def search_law_url(client: httpx.Client, name_vi: str, doc_number: str | None = None) -> str | None: |
|
|
"""Search for a law on thuvienphapluat.vn and return its URL.""" |
|
|
try: |
|
|
|
|
|
keyword = doc_number if doc_number else name_vi |
|
|
params = { |
|
|
"keyword": keyword, |
|
|
"type": "0", |
|
|
"match": "True", |
|
|
"area": "0", |
|
|
"status": "0", |
|
|
"signer": "0", |
|
|
"sort": "0", |
|
|
"page": "1", |
|
|
} |
|
|
|
|
|
response = client.get(SEARCH_URL, params=params, follow_redirects=True) |
|
|
response.raise_for_status() |
|
|
|
|
|
soup = BeautifulSoup(response.text, "lxml") |
|
|
|
|
|
|
|
|
for item in soup.select(".nqDoc, .doc-item, .search-result-item, [class*='item']"): |
|
|
link = item.select_one("a[href*='/van-ban/']") |
|
|
if link: |
|
|
href = link.get("href", "") |
|
|
if href: |
|
|
return urljoin(BASE_URL, href) |
|
|
|
|
|
|
|
|
for link in soup.select("a[href*='/van-ban/']"): |
|
|
href = link.get("href", "") |
|
|
if href and "/van-ban/" in href: |
|
|
return urljoin(BASE_URL, href) |
|
|
|
|
|
return None |
|
|
|
|
|
except Exception as e: |
|
|
console.print(f"[yellow]Search failed for {name_vi}: {e}[/yellow]") |
|
|
return None |
|
|
|
|
|
|
|
|
def fetch_law_content(client: httpx.Client, url: str) -> dict | None: |
|
|
"""Fetch law content from thuvienphapluat.vn.""" |
|
|
try: |
|
|
response = client.get(url, follow_redirects=True) |
|
|
response.raise_for_status() |
|
|
|
|
|
soup = BeautifulSoup(response.text, "lxml") |
|
|
|
|
|
|
|
|
metadata = { |
|
|
"url": url, |
|
|
"downloaded_at": datetime.now().isoformat(), |
|
|
} |
|
|
|
|
|
|
|
|
title_elem = soup.select_one("h1.title, .doc-title h1, h1") |
|
|
if title_elem: |
|
|
metadata["title"] = title_elem.get_text(strip=True) |
|
|
|
|
|
|
|
|
doc_num = soup.select_one(".doc-number, .so-hieu") |
|
|
if doc_num: |
|
|
metadata["document_number"] = doc_num.get_text(strip=True) |
|
|
|
|
|
|
|
|
effective = soup.select_one(".effective-date, .ngay-hieu-luc") |
|
|
if effective: |
|
|
metadata["effective_date"] = effective.get_text(strip=True) |
|
|
|
|
|
|
|
|
status = soup.select_one(".doc-status, .tinh-trang") |
|
|
if status: |
|
|
metadata["status"] = status.get_text(strip=True) |
|
|
|
|
|
|
|
|
issuer = soup.select_one(".issuing-body, .co-quan-ban-hanh") |
|
|
if issuer: |
|
|
metadata["issuing_body"] = issuer.get_text(strip=True) |
|
|
|
|
|
|
|
|
content_elem = soup.select_one(".doc-content, .noi-dung, .content, article") |
|
|
if content_elem: |
|
|
|
|
|
for tag in content_elem.select("script, style"): |
|
|
tag.decompose() |
|
|
|
|
|
content = content_elem.get_text(separator="\n", strip=True) |
|
|
metadata["content"] = content |
|
|
else: |
|
|
|
|
|
body = soup.find("body") |
|
|
if body: |
|
|
for tag in body.select("script, style, nav, header, footer"): |
|
|
tag.decompose() |
|
|
metadata["content"] = body.get_text(separator="\n", strip=True) |
|
|
|
|
|
return metadata |
|
|
|
|
|
except Exception as e: |
|
|
console.print(f"[red]Error fetching {url}: {e}[/red]") |
|
|
return None |
|
|
|
|
|
|
|
|
def fetch_law_list_page(client: httpx.Client, page: int = 1) -> list[dict]: |
|
|
"""Fetch list of laws from thuvienphapluat.vn.""" |
|
|
url = f"{BASE_URL}/page/tim-van-ban.aspx?keyword=&type=0&match=True&area=0&status=0&signer=0&sort=0&page={page}" |
|
|
|
|
|
try: |
|
|
response = client.get(url, follow_redirects=True) |
|
|
response.raise_for_status() |
|
|
|
|
|
soup = BeautifulSoup(response.text, "lxml") |
|
|
laws = [] |
|
|
|
|
|
for item in soup.select(".doc-item, .search-result-item, .item"): |
|
|
law = {} |
|
|
|
|
|
link = item.select_one("a[href*='/van-ban/']") |
|
|
if link: |
|
|
law["url"] = urljoin(BASE_URL, link.get("href", "")) |
|
|
law["title"] = link.get_text(strip=True) |
|
|
|
|
|
doc_num = item.select_one(".doc-number, .so-hieu") |
|
|
if doc_num: |
|
|
law["document_number"] = doc_num.get_text(strip=True) |
|
|
|
|
|
if law.get("url"): |
|
|
laws.append(law) |
|
|
|
|
|
return laws |
|
|
|
|
|
except Exception as e: |
|
|
console.print(f"[red]Error fetching law list page {page}: {e}[/red]") |
|
|
return [] |
|
|
|
|
|
|
|
|
def get_laws_from_db() -> list[dict]: |
|
|
"""Get laws from SQLite database.""" |
|
|
if not DB_PATH.exists(): |
|
|
console.print(f"[yellow]Database not found: {DB_PATH}[/yellow]") |
|
|
return [] |
|
|
|
|
|
conn = sqlite3.connect(DB_PATH) |
|
|
conn.row_factory = sqlite3.Row |
|
|
cursor = conn.cursor() |
|
|
|
|
|
laws = [] |
|
|
|
|
|
|
|
|
cursor.execute("SELECT * FROM codes") |
|
|
for row in cursor.fetchall(): |
|
|
laws.append({ |
|
|
"id": f"code-{row['id']}", |
|
|
"type": "code", |
|
|
"name": row["name"], |
|
|
"name_vi": row["name_vi"], |
|
|
"year": row["year"], |
|
|
"document_number": row["document_number"], |
|
|
"effective_date": row["effective_date"], |
|
|
"status": row["status"], |
|
|
"url": row["url"], |
|
|
}) |
|
|
|
|
|
|
|
|
cursor.execute("SELECT * FROM laws") |
|
|
for row in cursor.fetchall(): |
|
|
laws.append({ |
|
|
"id": f"law-{row['id']}", |
|
|
"type": "law", |
|
|
"name": row["name"], |
|
|
"name_vi": row["name_vi"], |
|
|
"year": row["year"], |
|
|
"document_number": row["document_number"], |
|
|
"effective_date": row["effective_date"], |
|
|
"status": row["status"], |
|
|
"url": row["url"], |
|
|
}) |
|
|
|
|
|
conn.close() |
|
|
return laws |
|
|
|
|
|
|
|
|
def save_law_file(law: dict, content: str | None = None) -> Path: |
|
|
"""Save law to markdown file with front matter.""" |
|
|
DATA_DIR.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
|
|
|
law_type = law.get("type", "law") |
|
|
year = law.get("year", "unknown") |
|
|
name_vi = law.get("name_vi", law.get("name", "unknown")) |
|
|
slug = slugify(name_vi) |
|
|
|
|
|
|
|
|
prefix = "code" if law_type == "code" else "law" |
|
|
filename = f"{prefix}-{year}-{slug}.md" |
|
|
filepath = DATA_DIR / filename |
|
|
|
|
|
|
|
|
front_matter_data = { |
|
|
"title": law.get("name_vi", law.get("name")), |
|
|
"title_en": law.get("name"), |
|
|
"type": law_type, |
|
|
"year": year, |
|
|
"document_number": law.get("document_number"), |
|
|
"effective_date": law.get("effective_date"), |
|
|
"status": law.get("status", "Active"), |
|
|
"url": law.get("url"), |
|
|
"downloaded_at": datetime.now().isoformat(), |
|
|
} |
|
|
|
|
|
|
|
|
md_content = create_front_matter(front_matter_data) |
|
|
md_content += "\n\n" |
|
|
md_content += f"# {law.get('name_vi', law.get('name'))}\n\n" |
|
|
|
|
|
if law.get("name"): |
|
|
md_content += f"**English:** {law['name']}\n\n" |
|
|
|
|
|
if law.get("document_number"): |
|
|
md_content += f"**Số hiệu:** {law['document_number']}\n\n" |
|
|
|
|
|
if law.get("effective_date"): |
|
|
md_content += f"**Ngày hiệu lực:** {law['effective_date']}\n\n" |
|
|
|
|
|
if content: |
|
|
md_content += "## Nội dung\n\n" |
|
|
md_content += content |
|
|
else: |
|
|
md_content += "*Nội dung chưa được tải xuống.*\n" |
|
|
|
|
|
filepath.write_text(md_content, encoding="utf-8") |
|
|
return filepath |
|
|
|
|
|
|
|
|
@click.group() |
|
|
def cli(): |
|
|
"""Vietnamese Legal Corpus Downloader""" |
|
|
pass |
|
|
|
|
|
|
|
|
@cli.command() |
|
|
@click.option("--fetch-content", is_flag=True, help="Fetch full content from web") |
|
|
@click.option("--delay", default=1.0, help="Delay between requests (seconds)") |
|
|
@click.option("--limit", default=0, help="Limit number of laws to download (0 = all)") |
|
|
def download(fetch_content: bool, delay: float, limit: int): |
|
|
"""Download laws from database to markdown files.""" |
|
|
console.print("[bold blue]Vietnamese Legal Corpus Downloader[/bold blue]") |
|
|
console.print() |
|
|
|
|
|
|
|
|
laws = get_laws_from_db() |
|
|
|
|
|
if not laws: |
|
|
console.print("[yellow]No laws found in database. Please populate the database first.[/yellow]") |
|
|
return |
|
|
|
|
|
if limit > 0: |
|
|
laws = laws[:limit] |
|
|
|
|
|
console.print(f"Found [green]{len(laws)}[/green] laws/codes to download") |
|
|
console.print(f"Output directory: [cyan]{DATA_DIR}[/cyan]") |
|
|
console.print() |
|
|
|
|
|
with Progress( |
|
|
SpinnerColumn(), |
|
|
TextColumn("[progress.description]{task.description}"), |
|
|
BarColumn(), |
|
|
TaskProgressColumn(), |
|
|
console=console, |
|
|
) as progress: |
|
|
task = progress.add_task("Downloading...", total=len(laws)) |
|
|
|
|
|
client = httpx.Client(headers=HEADERS, timeout=30.0) |
|
|
|
|
|
try: |
|
|
for law in laws: |
|
|
name = law.get("name_vi", law.get("name", "Unknown")) |
|
|
progress.update(task, description=f"[cyan]{name[:40]}[/cyan]") |
|
|
|
|
|
content = None |
|
|
url = law.get("url") |
|
|
|
|
|
if fetch_content: |
|
|
|
|
|
if not url: |
|
|
url = search_law_url( |
|
|
client, |
|
|
law.get("name_vi", ""), |
|
|
law.get("document_number") |
|
|
) |
|
|
time.sleep(delay) |
|
|
|
|
|
|
|
|
if url: |
|
|
law["url"] = url |
|
|
data = fetch_law_content(client, url) |
|
|
if data: |
|
|
content = data.get("content") |
|
|
time.sleep(delay) |
|
|
|
|
|
filepath = save_law_file(law, content) |
|
|
progress.advance(task) |
|
|
|
|
|
finally: |
|
|
client.close() |
|
|
|
|
|
console.print() |
|
|
console.print(f"[green]Done![/green] Files saved to [cyan]{DATA_DIR}[/cyan]") |
|
|
|
|
|
|
|
|
@cli.command() |
|
|
def stats(): |
|
|
"""Show statistics from database.""" |
|
|
laws = get_laws_from_db() |
|
|
|
|
|
if not laws: |
|
|
console.print("[yellow]No data in database[/yellow]") |
|
|
return |
|
|
|
|
|
codes = [l for l in laws if l["type"] == "code"] |
|
|
law_list = [l for l in laws if l["type"] == "law"] |
|
|
|
|
|
console.print("[bold]Vietnamese Legal Corpus Statistics[/bold]") |
|
|
console.print() |
|
|
console.print(f" Codes (Bộ luật): [green]{len(codes)}[/green]") |
|
|
console.print(f" Laws (Luật): [green]{len(law_list)}[/green]") |
|
|
console.print(f" [bold]Total: [green]{len(laws)}[/green][/bold]") |
|
|
|
|
|
|
|
|
console.print() |
|
|
console.print("[bold]By Year:[/bold]") |
|
|
years = {} |
|
|
for law in laws: |
|
|
year = law.get("year", "Unknown") |
|
|
years[year] = years.get(year, 0) + 1 |
|
|
|
|
|
for year in sorted(years.keys(), reverse=True)[:10]: |
|
|
console.print(f" {year}: {years[year]}") |
|
|
|
|
|
|
|
|
@cli.command() |
|
|
def list_files(): |
|
|
"""List downloaded files.""" |
|
|
if not DATA_DIR.exists(): |
|
|
console.print("[yellow]No data directory found[/yellow]") |
|
|
return |
|
|
|
|
|
files = list(DATA_DIR.rglob("*.md")) |
|
|
console.print(f"Found [green]{len(files)}[/green] files in {DATA_DIR}") |
|
|
|
|
|
for f in files[:20]: |
|
|
console.print(f" {f.relative_to(DATA_DIR)}") |
|
|
|
|
|
if len(files) > 20: |
|
|
console.print(f" ... and {len(files) - 20} more") |
|
|
|
|
|
|
|
|
def main(): |
|
|
cli() |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|