rain1024 commited on
Commit
5f1fd73
·
verified ·
1 Parent(s): 6b603d9

Upload scripts/scrape_tuoitre.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scripts/scrape_tuoitre.py +175 -0
scripts/scrape_tuoitre.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Scrape news articles from Tuoi Tre (https://tuoitre.vn)
3
+ """
4
+
5
+ import json
6
+ import time
7
+ import random
8
+ import requests
9
+ from bs4 import BeautifulSoup
10
+ from pathlib import Path
11
+
12
+ # Tuoi Tre categories
13
+ CATEGORIES = {
14
+ "thoi-su": "Thời sự",
15
+ "the-gioi": "Thế giới",
16
+ "kinh-doanh": "Kinh doanh",
17
+ "giai-tri": "Giải trí",
18
+ "the-thao": "Thể thao",
19
+ "phap-luat": "Pháp luật",
20
+ "giao-duc": "Giáo dục",
21
+ "suc-khoe": "Sức khỏe",
22
+ "xe": "Xe",
23
+ "nhip-song-tre": "Đời sống",
24
+ }
25
+
26
+ BASE_URL = "https://tuoitre.vn"
27
+ OUTPUT_DIR = Path(__file__).parent.parent / "data" / "tuoitre"
28
+ TARGET_TOTAL = 1000
29
+
30
+
31
+ def get_article_urls(category_slug: str, max_pages: int = 100) -> list[dict]:
32
+ """Get article URLs from a category."""
33
+ urls = []
34
+
35
+ for page in range(1, max_pages + 1):
36
+ url = f"{BASE_URL}/{category_slug}/trang-{page}.htm"
37
+
38
+ try:
39
+ response = requests.get(url, timeout=30, headers={
40
+ "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36"
41
+ })
42
+ response.raise_for_status()
43
+ soup = BeautifulSoup(response.text, "html.parser")
44
+
45
+ # Find article links
46
+ articles = soup.select("h3.box-title-text a")
47
+ if not articles:
48
+ articles = soup.select("a.box-category-link-title")
49
+
50
+ for article in articles:
51
+ href = article.get("href", "")
52
+ if href and ".htm" in href:
53
+ full_url = href if href.startswith("http") else f"{BASE_URL}{href}"
54
+ if full_url not in [u["url"] for u in urls]:
55
+ urls.append({
56
+ "url": full_url,
57
+ "category": CATEGORIES[category_slug],
58
+ "category_slug": category_slug
59
+ })
60
+
61
+ print(f" Page {page}: found {len(articles)} articles, total unique: {len(urls)}")
62
+
63
+ if not articles or len(urls) >= TARGET_TOTAL * 2:
64
+ break
65
+
66
+ time.sleep(random.uniform(0.5, 1.5))
67
+
68
+ except Exception as e:
69
+ print(f" Error on page {page}: {e}")
70
+ continue
71
+
72
+ return urls
73
+
74
+
75
+ def scrape_article(url: str) -> dict | None:
76
+ """Scrape content from a single article."""
77
+ try:
78
+ response = requests.get(url, timeout=30, headers={
79
+ "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36"
80
+ })
81
+ response.raise_for_status()
82
+ soup = BeautifulSoup(response.text, "html.parser")
83
+
84
+ # Get title
85
+ title_elem = soup.select_one("h1.detail-title")
86
+ if not title_elem:
87
+ title_elem = soup.select_one("h1.article-title")
88
+ title = title_elem.get_text(strip=True) if title_elem else ""
89
+
90
+ # Get description
91
+ desc_elem = soup.select_one("h2.detail-sapo")
92
+ if not desc_elem:
93
+ desc_elem = soup.select_one("p.detail-sapo")
94
+ description = desc_elem.get_text(strip=True) if desc_elem else ""
95
+
96
+ # Get content
97
+ content_elem = soup.select_one("div.detail-content")
98
+ if not content_elem:
99
+ content_elem = soup.select_one("div#main-detail-body")
100
+
101
+ if content_elem:
102
+ paragraphs = content_elem.select("p")
103
+ content = "\n\n".join(p.get_text(strip=True) for p in paragraphs if p.get_text(strip=True))
104
+ else:
105
+ content = ""
106
+
107
+ # Get publish date
108
+ date_elem = soup.select_one("div.detail-time")
109
+ if not date_elem:
110
+ date_elem = soup.select_one("span.date-time")
111
+ publish_date = date_elem.get_text(strip=True) if date_elem else ""
112
+
113
+ if not content or len(content) < 100 or not publish_date:
114
+ return None
115
+
116
+ return {
117
+ "title": title,
118
+ "description": description,
119
+ "content": content,
120
+ "publish_date": publish_date
121
+ }
122
+
123
+ except Exception as e:
124
+ print(f" Error scraping {url}: {e}")
125
+ return None
126
+
127
+
128
+ def main():
129
+ OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
130
+
131
+ all_articles = []
132
+ urls_per_category = TARGET_TOTAL // len(CATEGORIES) + 50
133
+
134
+ for category_slug, category_name in CATEGORIES.items():
135
+ print(f"\nScraping category: {category_name}")
136
+
137
+ # Get article URLs
138
+ article_urls = get_article_urls(category_slug)
139
+ print(f" Found {len(article_urls)} article URLs")
140
+
141
+ # Scrape each article
142
+ count = 0
143
+ for item in article_urls:
144
+ if count >= urls_per_category:
145
+ break
146
+
147
+ article = scrape_article(item["url"])
148
+ if article:
149
+ article["source"] = "tuoitre"
150
+ article["url"] = item["url"]
151
+ article["category"] = item["category"]
152
+ all_articles.append(article)
153
+ count += 1
154
+
155
+ if count % 50 == 0:
156
+ print(f" Scraped {count} articles")
157
+
158
+ time.sleep(random.uniform(0.3, 0.8))
159
+
160
+ print(f" Total scraped for {category_name}: {count}")
161
+
162
+ if len(all_articles) >= TARGET_TOTAL:
163
+ break
164
+
165
+ # Save to JSON
166
+ output_file = OUTPUT_DIR / "tuoitre_articles.json"
167
+ with open(output_file, "w", encoding="utf-8") as f:
168
+ json.dump(all_articles[:TARGET_TOTAL], f, ensure_ascii=False, indent=2)
169
+
170
+ print(f"\nTotal articles: {min(len(all_articles), TARGET_TOTAL)}")
171
+ print(f"Saved to: {output_file}")
172
+
173
+
174
+ if __name__ == "__main__":
175
+ main()