rain1024 commited on
Commit
110317f
·
verified ·
1 Parent(s): 7b8c510

Upload scripts/scrape_nguoilaodong.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scripts/scrape_nguoilaodong.py +177 -0
scripts/scrape_nguoilaodong.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Scrape news articles from Nguoi Lao Dong (https://nld.com.vn)
3
+ """
4
+
5
+ import json
6
+ import time
7
+ import random
8
+ import requests
9
+ from bs4 import BeautifulSoup
10
+ from pathlib import Path
11
+
12
+ # Nguoi Lao Dong categories
13
+ CATEGORIES = {
14
+ "thoi-su": "Thời sự",
15
+ "the-gioi": "Thế giới",
16
+ "kinh-te": "Kinh doanh",
17
+ "van-hoa-giai-tri": "Giải trí",
18
+ "the-thao": "Thể thao",
19
+ "phap-luat": "Pháp luật",
20
+ "giao-duc": "Giáo dục",
21
+ "suc-khoe": "Sức khỏe",
22
+ "cong-doan": "Công đoàn",
23
+ "cong-nghe": "Khoa học",
24
+ }
25
+
26
+ BASE_URL = "https://nld.com.vn"
27
+ OUTPUT_DIR = Path(__file__).parent.parent / "data" / "nguoilaodong"
28
+ TARGET_TOTAL = 1000
29
+
30
+
31
+ def get_article_urls(category_slug: str, max_pages: int = 100) -> list[dict]:
32
+ """Get article URLs from a category."""
33
+ urls = []
34
+
35
+ for page in range(1, max_pages + 1):
36
+ url = f"{BASE_URL}/{category_slug}/trang{page}.htm"
37
+
38
+ try:
39
+ response = requests.get(url, timeout=30, headers={
40
+ "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36"
41
+ })
42
+ response.raise_for_status()
43
+ soup = BeautifulSoup(response.text, "html.parser")
44
+
45
+ # Find article links
46
+ articles = soup.select("h3.title-news a")
47
+ if not articles:
48
+ articles = soup.select("h2.title-news a")
49
+ if not articles:
50
+ articles = soup.select("a.title-news")
51
+
52
+ for article in articles:
53
+ href = article.get("href", "")
54
+ if href and ".htm" in href:
55
+ full_url = href if href.startswith("http") else f"{BASE_URL}{href}"
56
+ if full_url not in [u["url"] for u in urls]:
57
+ urls.append({
58
+ "url": full_url,
59
+ "category": CATEGORIES[category_slug],
60
+ "category_slug": category_slug
61
+ })
62
+
63
+ print(f" Page {page}: found {len(articles)} articles, total unique: {len(urls)}")
64
+
65
+ if not articles or len(urls) >= TARGET_TOTAL * 2:
66
+ break
67
+
68
+ time.sleep(random.uniform(0.5, 1.5))
69
+
70
+ except Exception as e:
71
+ print(f" Error on page {page}: {e}")
72
+ continue
73
+
74
+ return urls
75
+
76
+
77
+ def scrape_article(url: str) -> dict | None:
78
+ """Scrape content from a single article."""
79
+ try:
80
+ response = requests.get(url, timeout=30, headers={
81
+ "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36"
82
+ })
83
+ response.raise_for_status()
84
+ soup = BeautifulSoup(response.text, "html.parser")
85
+
86
+ # Get title
87
+ title_elem = soup.select_one("h1.title-detail")
88
+ if not title_elem:
89
+ title_elem = soup.select_one("h1.detail-title")
90
+ title = title_elem.get_text(strip=True) if title_elem else ""
91
+
92
+ # Get description
93
+ desc_elem = soup.select_one("p.sapo-detail")
94
+ if not desc_elem:
95
+ desc_elem = soup.select_one("div.sapo-detail")
96
+ description = desc_elem.get_text(strip=True) if desc_elem else ""
97
+
98
+ # Get content
99
+ content_elem = soup.select_one("div.content-detail")
100
+ if not content_elem:
101
+ content_elem = soup.select_one("div.detail-content")
102
+
103
+ if content_elem:
104
+ paragraphs = content_elem.select("p")
105
+ content = "\n\n".join(p.get_text(strip=True) for p in paragraphs if p.get_text(strip=True))
106
+ else:
107
+ content = ""
108
+
109
+ # Get publish date
110
+ date_elem = soup.select_one("span.date-detail")
111
+ if not date_elem:
112
+ date_elem = soup.select_one("time.time-detail")
113
+ publish_date = date_elem.get_text(strip=True) if date_elem else ""
114
+
115
+ if not content or len(content) < 100 or not publish_date:
116
+ return None
117
+
118
+ return {
119
+ "title": title,
120
+ "description": description,
121
+ "content": content,
122
+ "publish_date": publish_date
123
+ }
124
+
125
+ except Exception as e:
126
+ print(f" Error scraping {url}: {e}")
127
+ return None
128
+
129
+
130
+ def main():
131
+ OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
132
+
133
+ all_articles = []
134
+ urls_per_category = TARGET_TOTAL // len(CATEGORIES) + 50
135
+
136
+ for category_slug, category_name in CATEGORIES.items():
137
+ print(f"\nScraping category: {category_name}")
138
+
139
+ # Get article URLs
140
+ article_urls = get_article_urls(category_slug)
141
+ print(f" Found {len(article_urls)} article URLs")
142
+
143
+ # Scrape each article
144
+ count = 0
145
+ for item in article_urls:
146
+ if count >= urls_per_category:
147
+ break
148
+
149
+ article = scrape_article(item["url"])
150
+ if article:
151
+ article["source"] = "nguoilaodong"
152
+ article["url"] = item["url"]
153
+ article["category"] = item["category"]
154
+ all_articles.append(article)
155
+ count += 1
156
+
157
+ if count % 50 == 0:
158
+ print(f" Scraped {count} articles")
159
+
160
+ time.sleep(random.uniform(0.3, 0.8))
161
+
162
+ print(f" Total scraped for {category_name}: {count}")
163
+
164
+ if len(all_articles) >= TARGET_TOTAL:
165
+ break
166
+
167
+ # Save to JSON
168
+ output_file = OUTPUT_DIR / "nguoilaodong_articles.json"
169
+ with open(output_file, "w", encoding="utf-8") as f:
170
+ json.dump(all_articles[:TARGET_TOTAL], f, ensure_ascii=False, indent=2)
171
+
172
+ print(f"\nTotal articles: {min(len(all_articles), TARGET_TOTAL)}")
173
+ print(f"Saved to: {output_file}")
174
+
175
+
176
+ if __name__ == "__main__":
177
+ main()