| | """ |
| | Scrape news articles from Tuoi Tre (https://tuoitre.vn) |
| | """ |
| |
|
| | import json |
| | import time |
| | import random |
| | import requests |
| | from bs4 import BeautifulSoup |
| | from pathlib import Path |
| |
|
| | |
| | CATEGORIES = { |
| | "thoi-su": "Thời sự", |
| | "the-gioi": "Thế giới", |
| | "kinh-doanh": "Kinh doanh", |
| | "giai-tri": "Giải trí", |
| | "the-thao": "Thể thao", |
| | "phap-luat": "Pháp luật", |
| | "giao-duc": "Giáo dục", |
| | "suc-khoe": "Sức khỏe", |
| | "xe": "Xe", |
| | "nhip-song-tre": "Đời sống", |
| | } |
| |
|
| | BASE_URL = "https://tuoitre.vn" |
| | OUTPUT_DIR = Path(__file__).parent.parent / "data" / "tuoitre" |
| | TARGET_TOTAL = 1000 |
| |
|
| |
|
| | def get_article_urls(category_slug: str, max_pages: int = 100) -> list[dict]: |
| | """Get article URLs from a category.""" |
| | urls = [] |
| |
|
| | for page in range(1, max_pages + 1): |
| | url = f"{BASE_URL}/{category_slug}/trang-{page}.htm" |
| |
|
| | try: |
| | response = requests.get(url, timeout=30, headers={ |
| | "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36" |
| | }) |
| | response.raise_for_status() |
| | soup = BeautifulSoup(response.text, "html.parser") |
| |
|
| | |
| | articles = soup.select("h3.box-title-text a") |
| | if not articles: |
| | articles = soup.select("a.box-category-link-title") |
| |
|
| | for article in articles: |
| | href = article.get("href", "") |
| | if href and ".htm" in href: |
| | full_url = href if href.startswith("http") else f"{BASE_URL}{href}" |
| | if full_url not in [u["url"] for u in urls]: |
| | urls.append({ |
| | "url": full_url, |
| | "category": CATEGORIES[category_slug], |
| | "category_slug": category_slug |
| | }) |
| |
|
| | print(f" Page {page}: found {len(articles)} articles, total unique: {len(urls)}") |
| |
|
| | if not articles or len(urls) >= TARGET_TOTAL * 2: |
| | break |
| |
|
| | time.sleep(random.uniform(0.5, 1.5)) |
| |
|
| | except Exception as e: |
| | print(f" Error on page {page}: {e}") |
| | continue |
| |
|
| | return urls |
| |
|
| |
|
| | def scrape_article(url: str) -> dict | None: |
| | """Scrape content from a single article.""" |
| | try: |
| | response = requests.get(url, timeout=30, headers={ |
| | "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36" |
| | }) |
| | response.raise_for_status() |
| | soup = BeautifulSoup(response.text, "html.parser") |
| |
|
| | |
| | title_elem = soup.select_one("h1.detail-title") |
| | if not title_elem: |
| | title_elem = soup.select_one("h1.article-title") |
| | title = title_elem.get_text(strip=True) if title_elem else "" |
| |
|
| | |
| | desc_elem = soup.select_one("h2.detail-sapo") |
| | if not desc_elem: |
| | desc_elem = soup.select_one("p.detail-sapo") |
| | description = desc_elem.get_text(strip=True) if desc_elem else "" |
| |
|
| | |
| | content_elem = soup.select_one("div.detail-content") |
| | if not content_elem: |
| | content_elem = soup.select_one("div#main-detail-body") |
| |
|
| | if content_elem: |
| | paragraphs = content_elem.select("p") |
| | content = "\n\n".join(p.get_text(strip=True) for p in paragraphs if p.get_text(strip=True)) |
| | else: |
| | content = "" |
| |
|
| | |
| | date_elem = soup.select_one("div.detail-time") |
| | if not date_elem: |
| | date_elem = soup.select_one("span.date-time") |
| | publish_date = date_elem.get_text(strip=True) if date_elem else "" |
| |
|
| | if not content or len(content) < 100 or not publish_date: |
| | return None |
| |
|
| | return { |
| | "title": title, |
| | "description": description, |
| | "content": content, |
| | "publish_date": publish_date |
| | } |
| |
|
| | except Exception as e: |
| | print(f" Error scraping {url}: {e}") |
| | return None |
| |
|
| |
|
| | def main(): |
| | OUTPUT_DIR.mkdir(parents=True, exist_ok=True) |
| |
|
| | all_articles = [] |
| | urls_per_category = TARGET_TOTAL // len(CATEGORIES) + 50 |
| |
|
| | for category_slug, category_name in CATEGORIES.items(): |
| | print(f"\nScraping category: {category_name}") |
| |
|
| | |
| | article_urls = get_article_urls(category_slug) |
| | print(f" Found {len(article_urls)} article URLs") |
| |
|
| | |
| | count = 0 |
| | for item in article_urls: |
| | if count >= urls_per_category: |
| | break |
| |
|
| | article = scrape_article(item["url"]) |
| | if article: |
| | article["source"] = "tuoitre" |
| | article["url"] = item["url"] |
| | article["category"] = item["category"] |
| | all_articles.append(article) |
| | count += 1 |
| |
|
| | if count % 50 == 0: |
| | print(f" Scraped {count} articles") |
| |
|
| | time.sleep(random.uniform(0.3, 0.8)) |
| |
|
| | print(f" Total scraped for {category_name}: {count}") |
| |
|
| | if len(all_articles) >= TARGET_TOTAL: |
| | break |
| |
|
| | |
| | output_file = OUTPUT_DIR / "tuoitre_articles.json" |
| | with open(output_file, "w", encoding="utf-8") as f: |
| | json.dump(all_articles[:TARGET_TOTAL], f, ensure_ascii=False, indent=2) |
| |
|
| | print(f"\nTotal articles: {min(len(all_articles), TARGET_TOTAL)}") |
| | print(f"Saved to: {output_file}") |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|