Datasets:
File size: 5,337 Bytes
698c8cc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
"""
Quick scraper - Get 100 articles from each source for testing.
"""
import json
import time
import random
import requests
from bs4 import BeautifulSoup
from pathlib import Path
OUTPUT_DIR = Path(__file__).parent.parent / "data"
# VnExpress categories
VNEXPRESS_CATEGORIES = {
"thoi-su": "Thời sự",
"the-gioi": "Thế giới",
"kinh-doanh": "Kinh doanh",
"giai-tri": "Giải trí",
"the-thao": "Thể thao",
}
# Dan Tri categories
DANTRI_CATEGORIES = {
"xa-hoi": "Xã hội",
"the-gioi": "Thế giới",
"kinh-doanh": "Kinh doanh",
"giai-tri": "Giải trí",
"the-thao": "Thể thao",
}
HEADERS = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36"}
def scrape_vnexpress_article(url: str) -> dict | None:
try:
response = requests.get(url, timeout=30, headers=HEADERS)
soup = BeautifulSoup(response.text, "html.parser")
title = soup.select_one("h1.title-detail")
title = title.get_text(strip=True) if title else ""
desc = soup.select_one("p.description")
desc = desc.get_text(strip=True) if desc else ""
content_elem = soup.select_one("article.fck_detail")
if content_elem:
paragraphs = content_elem.select("p.Normal")
content = "\n\n".join(p.get_text(strip=True) for p in paragraphs if p.get_text(strip=True))
else:
content = ""
if not content:
return None
return {"title": title, "description": desc, "content": content}
except Exception as e:
return None
def scrape_dantri_article(url: str) -> dict | None:
try:
response = requests.get(url, timeout=30, headers=HEADERS)
soup = BeautifulSoup(response.text, "html.parser")
title = soup.select_one("h1.title-page") or soup.select_one("h1.e-magazine__title")
title = title.get_text(strip=True) if title else ""
desc = soup.select_one("h2.singular-sapo") or soup.select_one("div.singular-sapo")
desc = desc.get_text(strip=True) if desc else ""
content_elem = soup.select_one("div.singular-content") or soup.select_one("div.e-magazine__body")
if content_elem:
paragraphs = content_elem.select("p")
content = "\n\n".join(p.get_text(strip=True) for p in paragraphs if p.get_text(strip=True))
else:
content = ""
if not content:
return None
return {"title": title, "description": desc, "content": content}
except Exception as e:
return None
def get_vnexpress_urls(category: str, max_articles: int = 25) -> list[str]:
urls = []
url = f"https://vnexpress.net/{category}"
try:
response = requests.get(url, timeout=30, headers=HEADERS)
soup = BeautifulSoup(response.text, "html.parser")
articles = soup.select("article.item-news h3.title-news a")
for a in articles[:max_articles]:
href = a.get("href", "")
if href.startswith("https://vnexpress.net"):
urls.append(href)
except:
pass
return urls
def get_dantri_urls(category: str, max_articles: int = 25) -> list[str]:
urls = []
url = f"https://dantri.com.vn/{category}.htm"
try:
response = requests.get(url, timeout=30, headers=HEADERS)
soup = BeautifulSoup(response.text, "html.parser")
articles = soup.select("h3.article-title a")
for a in articles[:max_articles]:
href = a.get("href", "")
full_url = href if href.startswith("http") else f"https://dantri.com.vn{href}"
urls.append(full_url)
except:
pass
return urls
def main():
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
all_articles = []
# Scrape VnExpress
print("=== Scraping VnExpress ===")
for cat_slug, cat_name in VNEXPRESS_CATEGORIES.items():
print(f" {cat_name}...", end=" ")
urls = get_vnexpress_urls(cat_slug)
count = 0
for url in urls:
article = scrape_vnexpress_article(url)
if article:
article["source"] = "vnexpress"
article["url"] = url
article["category"] = cat_name
all_articles.append(article)
count += 1
time.sleep(random.uniform(0.2, 0.5))
print(f"{count} articles")
# Scrape Dan Tri
print("\n=== Scraping Dan Tri ===")
for cat_slug, cat_name in DANTRI_CATEGORIES.items():
print(f" {cat_name}...", end=" ")
urls = get_dantri_urls(cat_slug)
count = 0
for url in urls:
article = scrape_dantri_article(url)
if article:
article["source"] = "dantri"
article["url"] = url
article["category"] = cat_name
all_articles.append(article)
count += 1
time.sleep(random.uniform(0.2, 0.5))
print(f"{count} articles")
# Save
output_file = OUTPUT_DIR / "all_articles.json"
with open(output_file, "w", encoding="utf-8") as f:
json.dump(all_articles, f, ensure_ascii=False, indent=2)
print(f"\n=== Total: {len(all_articles)} articles ===")
print(f"Saved to: {output_file}")
if __name__ == "__main__":
main()
|