UVN-1 / scripts /scrape_dantri.py
rain1024's picture
Upload scripts/scrape_dantri.py with huggingface_hub
da11d9b verified
"""
Scrape news articles from Dan Tri (https://dantri.com.vn)
"""
import json
import time
import random
import requests
from bs4 import BeautifulSoup
from pathlib import Path
from datetime import datetime
# Dan Tri categories
CATEGORIES = {
"xa-hoi": "Xã hội",
"the-gioi": "Thế giới",
"kinh-doanh": "Kinh doanh",
"giai-tri": "Giải trí",
"the-thao": "Thể thao",
"phap-luat": "Pháp luật",
"giao-duc": "Giáo dục",
"suc-khoe": "Sức khỏe",
"doi-song": "Đời sống",
"khoa-hoc-cong-nghe": "Khoa học",
}
BASE_URL = "https://dantri.com.vn"
OUTPUT_DIR = Path(__file__).parent.parent / "data" / "dantri"
def get_article_urls(category_slug: str, max_pages: int = 50) -> list[dict]:
"""Get article URLs from a category."""
urls = []
for page in range(1, max_pages + 1):
url = f"{BASE_URL}/{category_slug}/trang-{page}.htm"
try:
response = requests.get(url, timeout=30, headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36"
})
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
# Find article links
articles = soup.select("article.article-item h3.article-title a")
if not articles:
articles = soup.select("h3.article-title a")
for article in articles:
href = article.get("href", "")
if href:
full_url = href if href.startswith("http") else f"{BASE_URL}{href}"
urls.append({
"url": full_url,
"category": CATEGORIES[category_slug],
"category_slug": category_slug
})
print(f" Page {page}: found {len(articles)} articles")
if not articles:
break
time.sleep(random.uniform(0.5, 1.5))
except Exception as e:
print(f" Error on page {page}: {e}")
continue
return urls
def scrape_article(url: str) -> dict | None:
"""Scrape content from a single article."""
try:
response = requests.get(url, timeout=30, headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36"
})
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
# Get title
title_elem = soup.select_one("h1.title-page")
if not title_elem:
title_elem = soup.select_one("h1.e-magazine__title")
title = title_elem.get_text(strip=True) if title_elem else ""
# Get description/summary
desc_elem = soup.select_one("h2.singular-sapo")
if not desc_elem:
desc_elem = soup.select_one("div.singular-sapo")
description = desc_elem.get_text(strip=True) if desc_elem else ""
# Get content
content_elem = soup.select_one("div.singular-content")
if not content_elem:
content_elem = soup.select_one("div.e-magazine__body")
if content_elem:
paragraphs = content_elem.select("p")
content = "\n\n".join(p.get_text(strip=True) for p in paragraphs if p.get_text(strip=True))
else:
content = ""
# Get publish date
date_elem = soup.select_one("time.author-time")
publish_date = date_elem.get_text(strip=True) if date_elem else ""
if not content or not publish_date:
return None
return {
"title": title,
"description": description,
"content": content,
"publish_date": publish_date
}
except Exception as e:
print(f" Error scraping {url}: {e}")
return None
TARGET_TOTAL = 1000
def main():
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
all_articles = []
target_per_category = TARGET_TOTAL // len(CATEGORIES) + 50 # Buffer
for category_slug, category_name in CATEGORIES.items():
print(f"\nScraping category: {category_name}")
# Get article URLs
article_urls = get_article_urls(category_slug)
print(f" Found {len(article_urls)} article URLs")
# Scrape each article
count = 0
for item in article_urls:
if count >= target_per_category:
break
article = scrape_article(item["url"])
if article:
article["source"] = "dantri"
article["url"] = item["url"]
article["category"] = item["category"]
all_articles.append(article)
count += 1
if count % 50 == 0:
print(f" Scraped {count} articles")
time.sleep(random.uniform(0.3, 0.8))
print(f" Total scraped for {category_name}: {count}")
if len(all_articles) >= TARGET_TOTAL:
break
# Save to JSON
output_file = OUTPUT_DIR / "dantri_articles.json"
with open(output_file, "w", encoding="utf-8") as f:
json.dump(all_articles[:TARGET_TOTAL], f, ensure_ascii=False, indent=2)
print(f"\nTotal articles: {min(len(all_articles), TARGET_TOTAL)}")
print(f"Saved to: {output_file}")
if __name__ == "__main__":
main()