rain1024 commited on
Commit
698c8cc
·
verified ·
1 Parent(s): 71f15c8

Upload scripts/scrape_quick.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scripts/scrape_quick.py +167 -0
scripts/scrape_quick.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Quick scraper - Get 100 articles from each source for testing.
3
+ """
4
+
5
+ import json
6
+ import time
7
+ import random
8
+ import requests
9
+ from bs4 import BeautifulSoup
10
+ from pathlib import Path
11
+
12
+ OUTPUT_DIR = Path(__file__).parent.parent / "data"
13
+
14
+ # VnExpress categories
15
+ VNEXPRESS_CATEGORIES = {
16
+ "thoi-su": "Thời sự",
17
+ "the-gioi": "Thế giới",
18
+ "kinh-doanh": "Kinh doanh",
19
+ "giai-tri": "Giải trí",
20
+ "the-thao": "Thể thao",
21
+ }
22
+
23
+ # Dan Tri categories
24
+ DANTRI_CATEGORIES = {
25
+ "xa-hoi": "Xã hội",
26
+ "the-gioi": "Thế giới",
27
+ "kinh-doanh": "Kinh doanh",
28
+ "giai-tri": "Giải trí",
29
+ "the-thao": "Thể thao",
30
+ }
31
+
32
+ HEADERS = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36"}
33
+
34
+
35
+ def scrape_vnexpress_article(url: str) -> dict | None:
36
+ try:
37
+ response = requests.get(url, timeout=30, headers=HEADERS)
38
+ soup = BeautifulSoup(response.text, "html.parser")
39
+
40
+ title = soup.select_one("h1.title-detail")
41
+ title = title.get_text(strip=True) if title else ""
42
+
43
+ desc = soup.select_one("p.description")
44
+ desc = desc.get_text(strip=True) if desc else ""
45
+
46
+ content_elem = soup.select_one("article.fck_detail")
47
+ if content_elem:
48
+ paragraphs = content_elem.select("p.Normal")
49
+ content = "\n\n".join(p.get_text(strip=True) for p in paragraphs if p.get_text(strip=True))
50
+ else:
51
+ content = ""
52
+
53
+ if not content:
54
+ return None
55
+
56
+ return {"title": title, "description": desc, "content": content}
57
+ except Exception as e:
58
+ return None
59
+
60
+
61
+ def scrape_dantri_article(url: str) -> dict | None:
62
+ try:
63
+ response = requests.get(url, timeout=30, headers=HEADERS)
64
+ soup = BeautifulSoup(response.text, "html.parser")
65
+
66
+ title = soup.select_one("h1.title-page") or soup.select_one("h1.e-magazine__title")
67
+ title = title.get_text(strip=True) if title else ""
68
+
69
+ desc = soup.select_one("h2.singular-sapo") or soup.select_one("div.singular-sapo")
70
+ desc = desc.get_text(strip=True) if desc else ""
71
+
72
+ content_elem = soup.select_one("div.singular-content") or soup.select_one("div.e-magazine__body")
73
+ if content_elem:
74
+ paragraphs = content_elem.select("p")
75
+ content = "\n\n".join(p.get_text(strip=True) for p in paragraphs if p.get_text(strip=True))
76
+ else:
77
+ content = ""
78
+
79
+ if not content:
80
+ return None
81
+
82
+ return {"title": title, "description": desc, "content": content}
83
+ except Exception as e:
84
+ return None
85
+
86
+
87
+ def get_vnexpress_urls(category: str, max_articles: int = 25) -> list[str]:
88
+ urls = []
89
+ url = f"https://vnexpress.net/{category}"
90
+ try:
91
+ response = requests.get(url, timeout=30, headers=HEADERS)
92
+ soup = BeautifulSoup(response.text, "html.parser")
93
+ articles = soup.select("article.item-news h3.title-news a")
94
+ for a in articles[:max_articles]:
95
+ href = a.get("href", "")
96
+ if href.startswith("https://vnexpress.net"):
97
+ urls.append(href)
98
+ except:
99
+ pass
100
+ return urls
101
+
102
+
103
+ def get_dantri_urls(category: str, max_articles: int = 25) -> list[str]:
104
+ urls = []
105
+ url = f"https://dantri.com.vn/{category}.htm"
106
+ try:
107
+ response = requests.get(url, timeout=30, headers=HEADERS)
108
+ soup = BeautifulSoup(response.text, "html.parser")
109
+ articles = soup.select("h3.article-title a")
110
+ for a in articles[:max_articles]:
111
+ href = a.get("href", "")
112
+ full_url = href if href.startswith("http") else f"https://dantri.com.vn{href}"
113
+ urls.append(full_url)
114
+ except:
115
+ pass
116
+ return urls
117
+
118
+
119
+ def main():
120
+ OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
121
+ all_articles = []
122
+
123
+ # Scrape VnExpress
124
+ print("=== Scraping VnExpress ===")
125
+ for cat_slug, cat_name in VNEXPRESS_CATEGORIES.items():
126
+ print(f" {cat_name}...", end=" ")
127
+ urls = get_vnexpress_urls(cat_slug)
128
+ count = 0
129
+ for url in urls:
130
+ article = scrape_vnexpress_article(url)
131
+ if article:
132
+ article["source"] = "vnexpress"
133
+ article["url"] = url
134
+ article["category"] = cat_name
135
+ all_articles.append(article)
136
+ count += 1
137
+ time.sleep(random.uniform(0.2, 0.5))
138
+ print(f"{count} articles")
139
+
140
+ # Scrape Dan Tri
141
+ print("\n=== Scraping Dan Tri ===")
142
+ for cat_slug, cat_name in DANTRI_CATEGORIES.items():
143
+ print(f" {cat_name}...", end=" ")
144
+ urls = get_dantri_urls(cat_slug)
145
+ count = 0
146
+ for url in urls:
147
+ article = scrape_dantri_article(url)
148
+ if article:
149
+ article["source"] = "dantri"
150
+ article["url"] = url
151
+ article["category"] = cat_name
152
+ all_articles.append(article)
153
+ count += 1
154
+ time.sleep(random.uniform(0.2, 0.5))
155
+ print(f"{count} articles")
156
+
157
+ # Save
158
+ output_file = OUTPUT_DIR / "all_articles.json"
159
+ with open(output_file, "w", encoding="utf-8") as f:
160
+ json.dump(all_articles, f, ensure_ascii=False, indent=2)
161
+
162
+ print(f"\n=== Total: {len(all_articles)} articles ===")
163
+ print(f"Saved to: {output_file}")
164
+
165
+
166
+ if __name__ == "__main__":
167
+ main()