|
|
""" |
|
|
Quick scraper - Get 100 articles from each source for testing. |
|
|
""" |
|
|
|
|
|
import json |
|
|
import time |
|
|
import random |
|
|
import requests |
|
|
from bs4 import BeautifulSoup |
|
|
from pathlib import Path |
|
|
|
|
|
OUTPUT_DIR = Path(__file__).parent.parent / "data" |
|
|
|
|
|
|
|
|
VNEXPRESS_CATEGORIES = { |
|
|
"thoi-su": "Thời sự", |
|
|
"the-gioi": "Thế giới", |
|
|
"kinh-doanh": "Kinh doanh", |
|
|
"giai-tri": "Giải trí", |
|
|
"the-thao": "Thể thao", |
|
|
} |
|
|
|
|
|
|
|
|
DANTRI_CATEGORIES = { |
|
|
"xa-hoi": "Xã hội", |
|
|
"the-gioi": "Thế giới", |
|
|
"kinh-doanh": "Kinh doanh", |
|
|
"giai-tri": "Giải trí", |
|
|
"the-thao": "Thể thao", |
|
|
} |
|
|
|
|
|
HEADERS = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36"} |
|
|
|
|
|
|
|
|
def scrape_vnexpress_article(url: str) -> dict | None: |
|
|
try: |
|
|
response = requests.get(url, timeout=30, headers=HEADERS) |
|
|
soup = BeautifulSoup(response.text, "html.parser") |
|
|
|
|
|
title = soup.select_one("h1.title-detail") |
|
|
title = title.get_text(strip=True) if title else "" |
|
|
|
|
|
desc = soup.select_one("p.description") |
|
|
desc = desc.get_text(strip=True) if desc else "" |
|
|
|
|
|
content_elem = soup.select_one("article.fck_detail") |
|
|
if content_elem: |
|
|
paragraphs = content_elem.select("p.Normal") |
|
|
content = "\n\n".join(p.get_text(strip=True) for p in paragraphs if p.get_text(strip=True)) |
|
|
else: |
|
|
content = "" |
|
|
|
|
|
if not content: |
|
|
return None |
|
|
|
|
|
return {"title": title, "description": desc, "content": content} |
|
|
except Exception as e: |
|
|
return None |
|
|
|
|
|
|
|
|
def scrape_dantri_article(url: str) -> dict | None: |
|
|
try: |
|
|
response = requests.get(url, timeout=30, headers=HEADERS) |
|
|
soup = BeautifulSoup(response.text, "html.parser") |
|
|
|
|
|
title = soup.select_one("h1.title-page") or soup.select_one("h1.e-magazine__title") |
|
|
title = title.get_text(strip=True) if title else "" |
|
|
|
|
|
desc = soup.select_one("h2.singular-sapo") or soup.select_one("div.singular-sapo") |
|
|
desc = desc.get_text(strip=True) if desc else "" |
|
|
|
|
|
content_elem = soup.select_one("div.singular-content") or soup.select_one("div.e-magazine__body") |
|
|
if content_elem: |
|
|
paragraphs = content_elem.select("p") |
|
|
content = "\n\n".join(p.get_text(strip=True) for p in paragraphs if p.get_text(strip=True)) |
|
|
else: |
|
|
content = "" |
|
|
|
|
|
if not content: |
|
|
return None |
|
|
|
|
|
return {"title": title, "description": desc, "content": content} |
|
|
except Exception as e: |
|
|
return None |
|
|
|
|
|
|
|
|
def get_vnexpress_urls(category: str, max_articles: int = 25) -> list[str]: |
|
|
urls = [] |
|
|
url = f"https://vnexpress.net/{category}" |
|
|
try: |
|
|
response = requests.get(url, timeout=30, headers=HEADERS) |
|
|
soup = BeautifulSoup(response.text, "html.parser") |
|
|
articles = soup.select("article.item-news h3.title-news a") |
|
|
for a in articles[:max_articles]: |
|
|
href = a.get("href", "") |
|
|
if href.startswith("https://vnexpress.net"): |
|
|
urls.append(href) |
|
|
except: |
|
|
pass |
|
|
return urls |
|
|
|
|
|
|
|
|
def get_dantri_urls(category: str, max_articles: int = 25) -> list[str]: |
|
|
urls = [] |
|
|
url = f"https://dantri.com.vn/{category}.htm" |
|
|
try: |
|
|
response = requests.get(url, timeout=30, headers=HEADERS) |
|
|
soup = BeautifulSoup(response.text, "html.parser") |
|
|
articles = soup.select("h3.article-title a") |
|
|
for a in articles[:max_articles]: |
|
|
href = a.get("href", "") |
|
|
full_url = href if href.startswith("http") else f"https://dantri.com.vn{href}" |
|
|
urls.append(full_url) |
|
|
except: |
|
|
pass |
|
|
return urls |
|
|
|
|
|
|
|
|
def main(): |
|
|
OUTPUT_DIR.mkdir(parents=True, exist_ok=True) |
|
|
all_articles = [] |
|
|
|
|
|
|
|
|
print("=== Scraping VnExpress ===") |
|
|
for cat_slug, cat_name in VNEXPRESS_CATEGORIES.items(): |
|
|
print(f" {cat_name}...", end=" ") |
|
|
urls = get_vnexpress_urls(cat_slug) |
|
|
count = 0 |
|
|
for url in urls: |
|
|
article = scrape_vnexpress_article(url) |
|
|
if article: |
|
|
article["source"] = "vnexpress" |
|
|
article["url"] = url |
|
|
article["category"] = cat_name |
|
|
all_articles.append(article) |
|
|
count += 1 |
|
|
time.sleep(random.uniform(0.2, 0.5)) |
|
|
print(f"{count} articles") |
|
|
|
|
|
|
|
|
print("\n=== Scraping Dan Tri ===") |
|
|
for cat_slug, cat_name in DANTRI_CATEGORIES.items(): |
|
|
print(f" {cat_name}...", end=" ") |
|
|
urls = get_dantri_urls(cat_slug) |
|
|
count = 0 |
|
|
for url in urls: |
|
|
article = scrape_dantri_article(url) |
|
|
if article: |
|
|
article["source"] = "dantri" |
|
|
article["url"] = url |
|
|
article["category"] = cat_name |
|
|
all_articles.append(article) |
|
|
count += 1 |
|
|
time.sleep(random.uniform(0.2, 0.5)) |
|
|
print(f"{count} articles") |
|
|
|
|
|
|
|
|
output_file = OUTPUT_DIR / "all_articles.json" |
|
|
with open(output_file, "w", encoding="utf-8") as f: |
|
|
json.dump(all_articles, f, ensure_ascii=False, indent=2) |
|
|
|
|
|
print(f"\n=== Total: {len(all_articles)} articles ===") |
|
|
print(f"Saved to: {output_file}") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|