Datasets:
File size: 5,545 Bytes
5f1fd73 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 | """
Scrape news articles from Tuoi Tre (https://tuoitre.vn)
"""
import json
import time
import random
import requests
from bs4 import BeautifulSoup
from pathlib import Path
# Tuoi Tre categories
CATEGORIES = {
"thoi-su": "Thời sự",
"the-gioi": "Thế giới",
"kinh-doanh": "Kinh doanh",
"giai-tri": "Giải trí",
"the-thao": "Thể thao",
"phap-luat": "Pháp luật",
"giao-duc": "Giáo dục",
"suc-khoe": "Sức khỏe",
"xe": "Xe",
"nhip-song-tre": "Đời sống",
}
BASE_URL = "https://tuoitre.vn"
OUTPUT_DIR = Path(__file__).parent.parent / "data" / "tuoitre"
TARGET_TOTAL = 1000
def get_article_urls(category_slug: str, max_pages: int = 100) -> list[dict]:
"""Get article URLs from a category."""
urls = []
for page in range(1, max_pages + 1):
url = f"{BASE_URL}/{category_slug}/trang-{page}.htm"
try:
response = requests.get(url, timeout=30, headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36"
})
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
# Find article links
articles = soup.select("h3.box-title-text a")
if not articles:
articles = soup.select("a.box-category-link-title")
for article in articles:
href = article.get("href", "")
if href and ".htm" in href:
full_url = href if href.startswith("http") else f"{BASE_URL}{href}"
if full_url not in [u["url"] for u in urls]:
urls.append({
"url": full_url,
"category": CATEGORIES[category_slug],
"category_slug": category_slug
})
print(f" Page {page}: found {len(articles)} articles, total unique: {len(urls)}")
if not articles or len(urls) >= TARGET_TOTAL * 2:
break
time.sleep(random.uniform(0.5, 1.5))
except Exception as e:
print(f" Error on page {page}: {e}")
continue
return urls
def scrape_article(url: str) -> dict | None:
"""Scrape content from a single article."""
try:
response = requests.get(url, timeout=30, headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36"
})
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
# Get title
title_elem = soup.select_one("h1.detail-title")
if not title_elem:
title_elem = soup.select_one("h1.article-title")
title = title_elem.get_text(strip=True) if title_elem else ""
# Get description
desc_elem = soup.select_one("h2.detail-sapo")
if not desc_elem:
desc_elem = soup.select_one("p.detail-sapo")
description = desc_elem.get_text(strip=True) if desc_elem else ""
# Get content
content_elem = soup.select_one("div.detail-content")
if not content_elem:
content_elem = soup.select_one("div#main-detail-body")
if content_elem:
paragraphs = content_elem.select("p")
content = "\n\n".join(p.get_text(strip=True) for p in paragraphs if p.get_text(strip=True))
else:
content = ""
# Get publish date
date_elem = soup.select_one("div.detail-time")
if not date_elem:
date_elem = soup.select_one("span.date-time")
publish_date = date_elem.get_text(strip=True) if date_elem else ""
if not content or len(content) < 100 or not publish_date:
return None
return {
"title": title,
"description": description,
"content": content,
"publish_date": publish_date
}
except Exception as e:
print(f" Error scraping {url}: {e}")
return None
def main():
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
all_articles = []
urls_per_category = TARGET_TOTAL // len(CATEGORIES) + 50
for category_slug, category_name in CATEGORIES.items():
print(f"\nScraping category: {category_name}")
# Get article URLs
article_urls = get_article_urls(category_slug)
print(f" Found {len(article_urls)} article URLs")
# Scrape each article
count = 0
for item in article_urls:
if count >= urls_per_category:
break
article = scrape_article(item["url"])
if article:
article["source"] = "tuoitre"
article["url"] = item["url"]
article["category"] = item["category"]
all_articles.append(article)
count += 1
if count % 50 == 0:
print(f" Scraped {count} articles")
time.sleep(random.uniform(0.3, 0.8))
print(f" Total scraped for {category_name}: {count}")
if len(all_articles) >= TARGET_TOTAL:
break
# Save to JSON
output_file = OUTPUT_DIR / "tuoitre_articles.json"
with open(output_file, "w", encoding="utf-8") as f:
json.dump(all_articles[:TARGET_TOTAL], f, ensure_ascii=False, indent=2)
print(f"\nTotal articles: {min(len(all_articles), TARGET_TOTAL)}")
print(f"Saved to: {output_file}")
if __name__ == "__main__":
main()
|