|
|
""" |
|
|
Run all news scrapers to collect 1000 articles from each of 6 Vietnamese newspapers. |
|
|
Total: 6000 articles |
|
|
|
|
|
Features: |
|
|
- 10 parallel workers |
|
|
- Progress display |
|
|
- Saves crawled URLs to avoid duplicates |
|
|
""" |
|
|
|
|
|
import json |
|
|
import time |
|
|
import random |
|
|
import requests |
|
|
from bs4 import BeautifulSoup |
|
|
from pathlib import Path |
|
|
from concurrent.futures import ThreadPoolExecutor, as_completed |
|
|
from threading import Lock |
|
|
import sys |
|
|
import urllib3 |
|
|
|
|
|
|
|
|
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) |
|
|
|
|
|
DATA_DIR = Path(__file__).parent.parent / "data" |
|
|
CRAWLED_FILE = DATA_DIR / "crawled_urls.json" |
|
|
TARGET_PER_SOURCE = 1000 |
|
|
NUM_WORKERS = 10 |
|
|
|
|
|
|
|
|
file_lock = Lock() |
|
|
progress_lock = Lock() |
|
|
|
|
|
|
|
|
NEWSPAPERS = { |
|
|
"vnexpress": { |
|
|
"base_url": "https://vnexpress.net", |
|
|
"categories": { |
|
|
"thoi-su": "Thời sự", |
|
|
"the-gioi": "Thế giới", |
|
|
"kinh-doanh": "Kinh doanh", |
|
|
"giai-tri": "Giải trí", |
|
|
"the-thao": "Thể thao", |
|
|
"phap-luat": "Pháp luật", |
|
|
"giao-duc": "Giáo dục", |
|
|
"suc-khoe": "Sức khỏe", |
|
|
"doi-song": "Đời sống", |
|
|
"khoa-hoc": "Khoa học", |
|
|
}, |
|
|
"page_url": lambda base, cat, page: f"{base}/{cat}" if page == 1 else f"{base}/{cat}-p{page}", |
|
|
"list_selector": ["article.item-news h3.title-news a", "h3.title-news a"], |
|
|
"title_selector": ["h1.title-detail"], |
|
|
"desc_selector": ["p.description"], |
|
|
"content_selector": ["article.fck_detail"], |
|
|
"content_p_selector": "p.Normal", |
|
|
"date_selector": ["span.date"], |
|
|
}, |
|
|
"dantri": { |
|
|
"base_url": "https://dantri.com.vn", |
|
|
"categories": { |
|
|
"xa-hoi": "Xã hội", |
|
|
"the-gioi": "Thế giới", |
|
|
"kinh-doanh": "Kinh doanh", |
|
|
"giai-tri": "Giải trí", |
|
|
"the-thao": "Thể thao", |
|
|
"phap-luat": "Pháp luật", |
|
|
"giao-duc": "Giáo dục", |
|
|
"suc-khoe": "Sức khỏe", |
|
|
"doi-song": "Đời sống", |
|
|
"khoa-hoc-cong-nghe": "Khoa học", |
|
|
}, |
|
|
"page_url": lambda base, cat, page: f"{base}/{cat}/trang-{page}.htm", |
|
|
"list_selector": ["article.article-item h3.article-title a", "h3.article-title a"], |
|
|
"title_selector": ["h1.title-page", "h1.e-magazine__title"], |
|
|
"desc_selector": ["h2.singular-sapo", "div.singular-sapo"], |
|
|
"content_selector": ["div.singular-content", "div.e-magazine__body"], |
|
|
"content_p_selector": "p", |
|
|
"date_selector": ["time.author-time"], |
|
|
}, |
|
|
"thanhnien": { |
|
|
"base_url": "https://thanhnien.vn", |
|
|
"categories": { |
|
|
"thoi-su": "Thời sự", |
|
|
"the-gioi": "Thế giới", |
|
|
"kinh-te": "Kinh doanh", |
|
|
"giai-tri": "Giải trí", |
|
|
"the-thao": "Thể thao", |
|
|
"phap-luat": "Pháp luật", |
|
|
"giao-duc": "Giáo dục", |
|
|
"suc-khoe": "Sức khỏe", |
|
|
"doi-song": "Đời sống", |
|
|
"cong-nghe": "Khoa học", |
|
|
}, |
|
|
"page_url": lambda base, cat, page: f"{base}/{cat}.htm" if page == 1 else f"{base}/{cat}/trang-{page}.htm", |
|
|
"list_selector": ["a[title]", "h3 a", "h2 a"], |
|
|
"title_selector": ["h1.detail-title", "h1.details__headline", "h1"], |
|
|
"desc_selector": ["h2.detail-sapo", "div.detail-sapo", "p.sapo"], |
|
|
"content_selector": ["div.detail-content", "div.details__content", "div.content"], |
|
|
"content_p_selector": "p", |
|
|
"date_selector": ["div.detail-time", "time", "span.time"], |
|
|
}, |
|
|
"tuoitre": { |
|
|
"base_url": "https://tuoitre.vn", |
|
|
"categories": { |
|
|
"thoi-su": "Thời sự", |
|
|
"the-gioi": "Thế giới", |
|
|
"kinh-doanh": "Kinh doanh", |
|
|
"giai-tri": "Giải trí", |
|
|
"the-thao": "Thể thao", |
|
|
"phap-luat": "Pháp luật", |
|
|
"giao-duc": "Giáo dục", |
|
|
"suc-khoe": "Sức khỏe", |
|
|
"xe": "Xe", |
|
|
"nhip-song-tre": "Đời sống", |
|
|
}, |
|
|
"page_url": lambda base, cat, page: f"{base}/{cat}.htm" if page == 1 else f"{base}/{cat}/trang-{page}.htm", |
|
|
"list_selector": ["h3 a", "a.box-category-link-title", "a[title]"], |
|
|
"title_selector": ["h1.detail-title", "h1.article-title", "h1"], |
|
|
"desc_selector": ["h2.detail-sapo", "p.detail-sapo", "p.sapo"], |
|
|
"content_selector": ["div.detail-content", "div#main-detail-body", "div.content"], |
|
|
"content_p_selector": "p", |
|
|
"date_selector": ["div.detail-time", "span.date-time", "time"], |
|
|
}, |
|
|
"tienphong": { |
|
|
"base_url": "https://tienphong.vn", |
|
|
"categories": { |
|
|
"xa-hoi": "Xã hội", |
|
|
"the-gioi": "Thế giới", |
|
|
"kinh-te": "Kinh doanh", |
|
|
"giai-tri": "Giải trí", |
|
|
"the-thao": "Thể thao", |
|
|
"phap-luat": "Pháp luật", |
|
|
"giao-duc": "Giáo dục", |
|
|
"suc-khoe": "Sức khỏe", |
|
|
"gioi-tre": "Đời sống", |
|
|
"cong-nghe": "Khoa học", |
|
|
}, |
|
|
"page_url": lambda base, cat, page: f"{base}/{cat}.html" if page == 1 else f"{base}/{cat}/trang-{page}.html", |
|
|
"list_selector": ["a[href*='post']", "h2 a", "h3 a"], |
|
|
"title_selector": ["h1.article__title", "h1.cms-title", "h1"], |
|
|
"desc_selector": ["h2.article__sapo", "div.article__sapo", "p.sapo"], |
|
|
"content_selector": ["div.article__body", "div.cms-body", "div.content"], |
|
|
"content_p_selector": "p", |
|
|
"date_selector": ["time.article__time", "span.cms-date", "time"], |
|
|
}, |
|
|
"nguoilaodong": { |
|
|
"base_url": "https://nld.com.vn", |
|
|
"categories": { |
|
|
"thoi-su": "Thời sự", |
|
|
"the-gioi": "Thế giới", |
|
|
"kinh-te": "Kinh doanh", |
|
|
"van-hoa-giai-tri": "Giải trí", |
|
|
"the-thao": "Thể thao", |
|
|
"phap-luat": "Pháp luật", |
|
|
"giao-duc": "Giáo dục", |
|
|
"suc-khoe": "Sức khỏe", |
|
|
"cong-doan": "Công đoàn", |
|
|
"cong-nghe": "Khoa học", |
|
|
}, |
|
|
"page_url": lambda base, cat, page: f"{base}/{cat}.htm" if page == 1 else f"{base}/{cat}/trang{page}.htm", |
|
|
"list_selector": ["h3 a", "h2 a", "a[title]"], |
|
|
"title_selector": ["h1.title-detail", "h1.detail-title", "h1"], |
|
|
"desc_selector": ["p.sapo-detail", "div.sapo-detail", "p.sapo"], |
|
|
"content_selector": ["div.content-detail", "div.detail-content", "div.content"], |
|
|
"content_p_selector": "p", |
|
|
"date_selector": ["span.date-detail", "time.time-detail", "time"], |
|
|
}, |
|
|
} |
|
|
|
|
|
HEADERS = { |
|
|
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36" |
|
|
} |
|
|
|
|
|
|
|
|
def load_crawled_urls() -> set: |
|
|
"""Load previously crawled URLs.""" |
|
|
if CRAWLED_FILE.exists(): |
|
|
with open(CRAWLED_FILE, "r") as f: |
|
|
return set(json.load(f)) |
|
|
return set() |
|
|
|
|
|
|
|
|
def save_crawled_urls(urls: set): |
|
|
"""Save crawled URLs to file.""" |
|
|
with file_lock: |
|
|
DATA_DIR.mkdir(parents=True, exist_ok=True) |
|
|
with open(CRAWLED_FILE, "w") as f: |
|
|
json.dump(list(urls), f) |
|
|
|
|
|
|
|
|
def select_element(soup, selectors): |
|
|
"""Try multiple selectors and return first match.""" |
|
|
for selector in selectors: |
|
|
elem = soup.select_one(selector) |
|
|
if elem: |
|
|
return elem |
|
|
return None |
|
|
|
|
|
|
|
|
def is_article_url(url: str, base_url: str) -> bool: |
|
|
"""Check if URL is likely an article (not category/image).""" |
|
|
if not url or not url.startswith(base_url): |
|
|
return False |
|
|
|
|
|
if ".htm" in url or ".html" in url or ".tpo" in url: |
|
|
|
|
|
path = url.replace(base_url, "") |
|
|
|
|
|
return len(path) > 30 or any(c.isdigit() for c in path) |
|
|
return False |
|
|
|
|
|
|
|
|
def get_article_urls(source: str, config: dict, crawled_urls: set) -> list[dict]: |
|
|
"""Get article URLs from all categories.""" |
|
|
urls = [] |
|
|
base_url = config["base_url"] |
|
|
seen_urls = set() |
|
|
|
|
|
for cat_slug, cat_name in config["categories"].items(): |
|
|
print(f" [{source}] Category: {cat_name}", flush=True) |
|
|
cat_urls = 0 |
|
|
|
|
|
for page in range(1, 30): |
|
|
url = config["page_url"](base_url, cat_slug, page) |
|
|
|
|
|
try: |
|
|
response = requests.get(url, timeout=30, headers=HEADERS, verify=False) |
|
|
if response.status_code != 200: |
|
|
break |
|
|
soup = BeautifulSoup(response.text, "html.parser") |
|
|
|
|
|
page_urls = 0 |
|
|
for selector in config["list_selector"]: |
|
|
articles = soup.select(selector) |
|
|
for article in articles: |
|
|
href = article.get("href", "") |
|
|
if href: |
|
|
full_url = href if href.startswith("http") else f"{base_url}{href}" |
|
|
if is_article_url(full_url, base_url) and full_url not in crawled_urls and full_url not in seen_urls: |
|
|
seen_urls.add(full_url) |
|
|
urls.append({ |
|
|
"url": full_url, |
|
|
"category": cat_name, |
|
|
"source": source |
|
|
}) |
|
|
page_urls += 1 |
|
|
cat_urls += 1 |
|
|
|
|
|
if page_urls == 0: |
|
|
break |
|
|
|
|
|
if len(urls) >= TARGET_PER_SOURCE * 2: |
|
|
break |
|
|
|
|
|
time.sleep(random.uniform(0.2, 0.5)) |
|
|
|
|
|
except Exception as e: |
|
|
print(f" [{source}] Error page {page}: {e}", flush=True) |
|
|
break |
|
|
|
|
|
print(f" [{source}] {cat_name}: {cat_urls} URLs", flush=True) |
|
|
|
|
|
if len(urls) >= TARGET_PER_SOURCE * 2: |
|
|
break |
|
|
|
|
|
return urls |
|
|
|
|
|
|
|
|
def scrape_article(item: dict, config: dict) -> dict | None: |
|
|
"""Scrape content from a single article.""" |
|
|
url = item["url"] |
|
|
|
|
|
try: |
|
|
response = requests.get(url, timeout=30, headers=HEADERS, verify=False) |
|
|
response.raise_for_status() |
|
|
soup = BeautifulSoup(response.text, "html.parser") |
|
|
|
|
|
|
|
|
title_elem = select_element(soup, config["title_selector"]) |
|
|
title = title_elem.get_text(strip=True) if title_elem else "" |
|
|
|
|
|
|
|
|
desc_elem = select_element(soup, config["desc_selector"]) |
|
|
description = desc_elem.get_text(strip=True) if desc_elem else "" |
|
|
|
|
|
|
|
|
content_elem = select_element(soup, config["content_selector"]) |
|
|
if content_elem: |
|
|
paragraphs = content_elem.select(config["content_p_selector"]) |
|
|
content = "\n\n".join(p.get_text(strip=True) for p in paragraphs if p.get_text(strip=True)) |
|
|
else: |
|
|
content = "" |
|
|
|
|
|
|
|
|
date_elem = select_element(soup, config["date_selector"]) |
|
|
publish_date = date_elem.get_text(strip=True) if date_elem else "" |
|
|
|
|
|
|
|
|
if not content or len(content) < 100 or not publish_date: |
|
|
return None |
|
|
|
|
|
return { |
|
|
"source": item["source"], |
|
|
"url": url, |
|
|
"category": item["category"], |
|
|
"title": title, |
|
|
"description": description, |
|
|
"content": content, |
|
|
"publish_date": publish_date |
|
|
} |
|
|
|
|
|
except Exception: |
|
|
return None |
|
|
|
|
|
|
|
|
def scrape_source(source: str, config: dict, crawled_urls: set, all_crawled: set): |
|
|
"""Scrape articles from a single source.""" |
|
|
print(f"\n{'='*60}") |
|
|
print(f"[{source.upper()}] Starting...") |
|
|
print("="*60) |
|
|
|
|
|
|
|
|
print(f"[{source}] Fetching article URLs...", flush=True) |
|
|
article_urls = get_article_urls(source, config, crawled_urls) |
|
|
print(f"[{source}] Found {len(article_urls)} new URLs") |
|
|
|
|
|
if not article_urls: |
|
|
return [] |
|
|
|
|
|
articles = [] |
|
|
completed = 0 |
|
|
|
|
|
def process_article(item): |
|
|
return scrape_article(item, config) |
|
|
|
|
|
with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor: |
|
|
futures = {executor.submit(process_article, item): item for item in article_urls} |
|
|
|
|
|
for future in as_completed(futures): |
|
|
item = futures[future] |
|
|
completed += 1 |
|
|
|
|
|
try: |
|
|
article = future.result() |
|
|
if article: |
|
|
articles.append(article) |
|
|
with progress_lock: |
|
|
all_crawled.add(item["url"]) |
|
|
|
|
|
|
|
|
if len(articles) % 10 == 0: |
|
|
print(f"[{source}] Progress: {len(articles)}/{TARGET_PER_SOURCE} articles | Processed: {completed}/{len(article_urls)}", flush=True) |
|
|
|
|
|
if len(articles) >= TARGET_PER_SOURCE: |
|
|
break |
|
|
except Exception: |
|
|
pass |
|
|
|
|
|
print(f"[{source}] Completed: {len(articles)} articles") |
|
|
return articles[:TARGET_PER_SOURCE] |
|
|
|
|
|
|
|
|
def main(): |
|
|
print("="*60) |
|
|
print("UVN Dataset Scraper") |
|
|
print(f"Target: 6 newspapers x {TARGET_PER_SOURCE} articles = {6*TARGET_PER_SOURCE} total") |
|
|
print(f"Workers: {NUM_WORKERS}") |
|
|
print("="*60) |
|
|
|
|
|
|
|
|
crawled_urls = load_crawled_urls() |
|
|
print(f"Previously crawled URLs: {len(crawled_urls)}") |
|
|
|
|
|
all_crawled = set(crawled_urls) |
|
|
all_articles = [] |
|
|
|
|
|
for source, config in NEWSPAPERS.items(): |
|
|
try: |
|
|
articles = scrape_source(source, config, crawled_urls, all_crawled) |
|
|
all_articles.extend(articles) |
|
|
|
|
|
|
|
|
save_crawled_urls(all_crawled) |
|
|
|
|
|
|
|
|
source_dir = DATA_DIR / source |
|
|
source_dir.mkdir(parents=True, exist_ok=True) |
|
|
output_file = source_dir / f"{source}_articles.json" |
|
|
with open(output_file, "w", encoding="utf-8") as f: |
|
|
json.dump(articles, f, ensure_ascii=False, indent=2) |
|
|
print(f"[{source}] Saved to: {output_file}") |
|
|
|
|
|
except KeyboardInterrupt: |
|
|
print("\nStopped by user. Saving progress...") |
|
|
save_crawled_urls(all_crawled) |
|
|
break |
|
|
except Exception as e: |
|
|
print(f"[{source}] Error: {e}") |
|
|
continue |
|
|
|
|
|
|
|
|
all_file = DATA_DIR / "all_articles.json" |
|
|
with open(all_file, "w", encoding="utf-8") as f: |
|
|
json.dump(all_articles, f, ensure_ascii=False, indent=2) |
|
|
|
|
|
print("\n" + "="*60) |
|
|
print("SUMMARY") |
|
|
print("="*60) |
|
|
print(f"Total articles: {len(all_articles)}") |
|
|
print(f"Total crawled URLs: {len(all_crawled)}") |
|
|
print(f"Saved to: {all_file}") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|