File size: 20,400 Bytes
ec6ea02
 
 
 
 
 
 
 
7645752
ec6ea02
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0ced4e8
ec6ea02
7645752
ec6ea02
bcdcc05
 
 
 
 
 
 
ec6ea02
 
7645752
 
 
 
c2a50a4
ec6ea02
c2a50a4
 
ec6ea02
 
7645752
 
 
ec6ea02
 
 
0ced4e8
c2a50a4
 
 
7645752
 
 
 
 
 
 
 
 
 
ec6ea02
bcdcc05
ec6ea02
bcdcc05
 
ec6ea02
 
 
 
 
 
 
 
bcdcc05
 
 
 
 
ec6ea02
 
 
7645752
ec6ea02
 
7645752
ec6ea02
bcdcc05
 
 
 
 
 
 
ec6ea02
bcdcc05
ec6ea02
 
 
 
bcdcc05
 
 
ec6ea02
 
 
 
 
 
7645752
ec6ea02
 
 
0ced4e8
 
ec6ea02
 
 
 
 
 
 
 
 
7645752
ec6ea02
 
 
 
 
bcdcc05
 
 
 
 
 
 
 
 
ec6ea02
bcdcc05
 
 
 
 
ec6ea02
 
 
bcdcc05
ec6ea02
 
 
 
 
7645752
ec6ea02
 
0ced4e8
ec6ea02
 
bcdcc05
7645752
bcdcc05
ec6ea02
 
 
 
bcdcc05
7645752
bcdcc05
ec6ea02
 
7645752
ec6ea02
 
 
 
 
7645752
ec6ea02
 
bcdcc05
 
ec6ea02
bcdcc05
 
 
 
 
 
ec6ea02
 
 
 
 
 
 
bcdcc05
ec6ea02
 
 
bcdcc05
 
 
 
 
 
ec6ea02
 
7645752
ec6ea02
 
bcdcc05
ec6ea02
bcdcc05
 
 
 
 
 
 
ec6ea02
 
 
 
bcdcc05
 
 
ec6ea02
 
bcdcc05
ec6ea02
bcdcc05
ec6ea02
 
bcdcc05
ec6ea02
 
 
bcdcc05
ec6ea02
 
 
7645752
ec6ea02
 
 
 
 
bcdcc05
 
 
ec6ea02
 
 
7645752
ec6ea02
 
 
 
 
 
 
 
 
 
 
 
bcdcc05
 
 
 
 
 
 
 
 
 
 
 
 
ec6ea02
7645752
ec6ea02
 
 
 
bcdcc05
 
 
 
 
ec6ea02
 
 
 
 
bcdcc05
ec6ea02
 
 
 
 
 
 
 
 
 
 
 
 
 
7645752
ec6ea02
 
 
 
bcdcc05
 
 
ec6ea02
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0ced4e8
 
ec6ea02
 
 
 
c2a50a4
0ced4e8
 
ec6ea02
 
 
0ced4e8
ec6ea02
 
c2a50a4
ec6ea02
 
42df94a
 
0ced4e8
 
 
 
 
 
 
 
 
 
 
 
 
ec6ea02
7645752
ec6ea02
42df94a
 
0ced4e8
42df94a
0ced4e8
ec6ea02
0ced4e8
ec6ea02
7645752
ec6ea02
7645752
 
0ced4e8
7645752
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2a50a4
42df94a
7645752
c2a50a4
0ced4e8
7645752
c2a50a4
0ced4e8
7645752
ec6ea02
42df94a
 
ec6ea02
7645752
0ced4e8
 
ec6ea02
 
 
c2a50a4
42df94a
ec6ea02
 
0ced4e8
ec6ea02
 
0ced4e8
 
ec6ea02
0ced4e8
 
 
ec6ea02
 
 
c2a50a4
 
42df94a
0ced4e8
 
ec6ea02
 
7645752
 
 
0ced4e8
ec6ea02
 
 
7645752
ec6ea02
 
 
0ced4e8
ec6ea02
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
import requests
import cloudscraper
from bs4 import BeautifulSoup
import time
import random
import logging
import re
from datetime import datetime, timedelta
from typing import List, Dict, Optional, Callable
from urllib.parse import urljoin, urlparse
from fake_useragent import UserAgent
import json
from dataclasses import dataclass

logger = logging.getLogger(__name__)

@dataclass
class NewsItem:
    """新聞項目資料結構"""
    title: str
    content: str
    url: str
    source: str
    category: str
    published_date: datetime
    sentiment: Optional[str] = None
    sentiment_score: Optional[float] = None

class CnYesNewsCrawler:
    """鉅亨網新聞爬蟲 - 完全無限制版"""
    
    def __init__(self, sentiment_analyzer=None, database=None):
        self.base_url = "https://news.cnyes.com"
        self.session = cloudscraper.create_scraper(
            browser={
                'browser': 'chrome',
                'platform': 'windows',
                'mobile': False
            }
        )
        self.ua = UserAgent()
        
        # 注入依賴
        self.sentiment_analyzer = sentiment_analyzer
        self.database = database
        
        # 修正後的新聞分類URL
        self.categories = {
            'us_stock': 'https://news.cnyes.com/news/cat/us_stock',      # 美股
            'tw_stock': 'https://news.cnyes.com/news/cat/tw_stock_news'  # 台股
        }
        
        # 進度回調函數
        self.progress_callback = None
        
        # 設置請求頭
        self._setup_headers()
        
        logger.info("爬蟲初始化完成 - 無限制模式")
        logger.info(f"美股URL: {self.categories['us_stock']}")
        logger.info(f"台股URL: {self.categories['tw_stock']}")
        
    def set_progress_callback(self, callback: Callable[[str], None]):
        """設置進度回調函數"""
        self.progress_callback = callback
        
    def _notify_progress(self, message: str):
        """通知進度更新"""
        if self.progress_callback:
            self.progress_callback(message)
        logger.info(message)
        
    def _setup_headers(self):
        """設置更真實的請求頭"""
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8',
            'Accept-Language': 'zh-TW,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'DNT': '1',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Sec-Fetch-User': '?1',
            'Cache-Control': 'max-age=0',
            'sec-ch-ua': '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"Windows"'
        })
    
    def _get_page(self, url: str, retries: int = 3) -> Optional[BeautifulSoup]:
        """獲取網頁內容"""
        for attempt in range(retries):
            try:
                time.sleep(random.uniform(3, 8))
                
                user_agents = [
                    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
                    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
                    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
                    'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:121.0) Gecko/20100101 Firefox/121.0'
                ]
                self.session.headers['User-Agent'] = random.choice(user_agents)
                
                logger.info(f"正在請求: {url}")
                response = self.session.get(url, timeout=30)
                
                if response.status_code == 200:
                    response.encoding = 'utf-8'
                    soup = BeautifulSoup(response.content, 'html.parser')
                    logger.info(f"成功獲取網頁: {url}")
                    return soup
                else:
                    logger.warning(f"HTTP {response.status_code} for {url}")
                    
            except Exception as e:
                logger.error(f"請求失敗 (嘗試 {attempt + 1}/{retries}): {e}")
                if attempt < retries - 1:
                    time.sleep(random.uniform(5, 15))
                    
        return None
    
    def _extract_article_urls(self, category_url: str, max_pages: int = 4) -> List[str]:
        """從分類頁面提取文章URL - 增加到4頁"""
        article_urls = []
        
        for page in range(1, max_pages + 1):
            try:
                if page == 1:
                    url = category_url
                else:
                    url = f"{category_url}?page={page}"
                
                self._notify_progress(f"🔍 爬取分類頁面 {page}: {url}")
                soup = self._get_page(url)
                
                if not soup:
                    continue
                
                link_selectors = [
                    'a[href*="/news/id/"]',
                    '.news-list a[href*="/news/id/"]',
                    '.list-item a[href*="/news/id/"]',
                    '.news-item a[href*="/news/id/"]',
                    'h3 a[href*="/news/id/"]',
                    '.title a[href*="/news/id/"]'
                ]
                
                page_urls = []
                for selector in link_selectors:
                    links = soup.select(selector)
                    if links:
                        logger.info(f"使用選擇器 '{selector}' 找到 {len(links)} 個連結")
                        break
                
                for link in links:
                    href = link.get('href')
                    if href and '/news/id/' in href:
                        full_url = urljoin(self.base_url, href)
                        if full_url not in page_urls:
                            page_urls.append(full_url)
                
                article_urls.extend(page_urls)
                self._notify_progress(f"📄 第 {page} 頁找到 {len(page_urls)} 篇文章")
                
                if not page_urls:
                    logger.warning(f"第 {page} 頁沒有找到文章,停止爬取後續頁面")
                    break
                    
                if page < max_pages:
                    time.sleep(random.uniform(8, 15))
                    
            except Exception as e:
                logger.error(f"爬取第 {page} 頁時發生錯誤: {e}")
                continue
        
        unique_urls = list(set(article_urls))
        self._notify_progress(f"🎯 總共找到 {len(unique_urls)} 篇獨特文章")
        return unique_urls
    
    def _extract_article_content(self, url: str, category: str) -> Optional[NewsItem]:
        """提取文章詳細內容"""
        try:
            soup = self._get_page(url)
            if not soup:
                return None
            
            # 提取標題
            title_selectors = [
                'h1[class*="title"]',
                'h1.news-title',
                'h1.article-title',
                '.article-header h1',
                '.news-header h1',
                '.content-header h1',
                'h1',
                'h2[class*="title"]',
                '.title h1',
                '.title h2'
            ]
            
            title = ""
            for selector in title_selectors:
                title_elem = soup.select_one(selector)
                if title_elem:
                    title = title_elem.get_text(strip=True)
                    if title and len(title) > 10:
                        break
            
            if not title:
                page_title = soup.find('title')
                if page_title:
                    title = page_title.get_text(strip=True).split(' | ')[0]
            
            if not title or len(title) < 5:
                logger.warning(f"標題太短或無法提取: {url}")
                return None
            
            # 提取內容
            content_selectors = [
                '.article-content',
                '.news-content', 
                '.content-body',
                '.article-body',
                '.news-body',
                '.post-content',
                '[class*="article-text"]',
                '[class*="content"]',
                '.article p',
                '.content p'
            ]
            
            content = ""
            for selector in content_selectors:
                content_container = soup.select_one(selector)
                if content_container:
                    for unwanted in content_container.select('script, style, .ad, .advertisement, .related, .share, .comment'):
                        unwanted.decompose()
                    
                    paragraphs = content_container.find_all(['p', 'div'], string=True)
                    content_parts = []
                    
                    for p in paragraphs:
                        text = p.get_text(strip=True)
                        if text and len(text) > 20 and not any(skip in text.lower() for skip in ['廣告', 'ad', 'advertisement', '分享', 'share']):
                            content_parts.append(text)
                    
                    content = '\n'.join(content_parts)
                    if len(content) > 100:
                        break
            
            if not content or len(content) < 50:
                logger.warning(f"內容太短或無法提取: {url}")
                return None
            
            # 提取發布時間
            published_date = self._extract_publish_date(soup)
            
            # 清理內容
            content = self._clean_content(content)
            
            # 創建新聞項目
            news_item = NewsItem(
                title=title,
                content=content[:2000],
                url=url,
                source='鉅亨網',
                category=category,
                published_date=published_date
            )
            
            return news_item
            
        except Exception as e:
            logger.error(f"提取文章內容時發生錯誤 {url}: {e}")
            return None
    
    def _clean_content(self, content: str) -> str:
        """清理內容"""
        content = re.sub(r'\s+', ' ', content)
        content = re.sub(r'[^\u4e00-\u9fff\u3400-\u4dbf\w\s.,!?()(),。!?:;「」『』]', '', content)
        
        sentences = content.split('。')
        unique_sentences = []
        for sentence in sentences:
            if sentence.strip() and sentence.strip() not in unique_sentences:
                unique_sentences.append(sentence.strip())
        
        return '。'.join(unique_sentences)
    
    def _extract_publish_date(self, soup: BeautifulSoup) -> datetime:
        """提取發布時間"""
        time_selectors = [
            'time[datetime]',
            '.publish-time',
            '.news-time',
            '.article-time',
            '[class*="time"]',
            '[class*="date"]',
            'meta[property="article:published_time"]',
            'meta[name="pubdate"]'
        ]
        
        for selector in time_selectors:
            time_elem = soup.select_one(selector)
            if time_elem:
                datetime_attr = time_elem.get('datetime') or time_elem.get('content')
                if datetime_attr:
                    try:
                        return datetime.fromisoformat(datetime_attr.replace('Z', '+00:00')).replace(tzinfo=None)
                    except:
                        pass
                
                time_text = time_elem.get_text(strip=True)
                parsed_time = self._parse_time_text(time_text)
                if parsed_time:
                    return parsed_time
        
        return datetime.now()
    
    def _parse_time_text(self, time_text: str) -> Optional[datetime]:
        """解析時間文字"""
        patterns = [
            r'(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})',
            r'(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2})',
            r'(\d{4})/(\d{2})/(\d{2})\s+(\d{2}):(\d{2})',
            r'(\d{4})-(\d{2})-(\d{2})',
            r'(\d{4})年(\d{1,2})月(\d{1,2})日\s*(\d{1,2}):(\d{2})',
            r'(\d{4})年(\d{1,2})月(\d{1,2})日'
        ]
        
        for pattern in patterns:
            match = re.search(pattern, time_text)
            if match:
                try:
                    groups = match.groups()
                    if len(groups) >= 6:
                        return datetime(int(groups[0]), int(groups[1]), int(groups[2]),
                                      int(groups[3]), int(groups[4]), int(groups[5]))
                    elif len(groups) >= 5:
                        return datetime(int(groups[0]), int(groups[1]), int(groups[2]),
                                      int(groups[3]), int(groups[4]))
                    else:
                        return datetime(int(groups[0]), int(groups[1]), int(groups[2]))
                except:
                    continue
        
        return None
    
    def crawl_category(self, category: str, unlimited: bool = True) -> List[NewsItem]:
        """爬取指定分類的新聞 - 完全無限制版"""
        if category not in self.categories:
            logger.error(f"無效的分類: {category}")
            return []
        
        category_name = "美股" if category == "us_stock" else "台股"
        mode_text = "無限制" if unlimited else "限制"
        self._notify_progress(f"🚀 開始爬取 {category_name} 分類新聞 ({mode_text}模式)")
        
        # 獲取文章URL列表
        category_url = self.categories[category]
        article_urls = self._extract_article_urls(category_url, max_pages=4)  # 增加到4頁
        
        if not article_urls:
            self._notify_progress(f"⚠️ 未找到 {category_name} 分類的文章URL")
            return []
        
        total_articles = len(article_urls)
        
        if unlimited:
            # **完全無限制模式 - 處理所有文章**
            self._notify_progress(f"🎯 無限制模式:將處理所有 {total_articles} 篇文章")
            articles_to_process = article_urls
        else:
            # 限制模式 - 最多20篇
            max_limit = 20
            if total_articles > max_limit:
                articles_to_process = article_urls[:max_limit]
                self._notify_progress(f"⚠️ 限制模式:只處理前 {max_limit} 篇文章(共找到 {total_articles} 篇)")
            else:
                articles_to_process = article_urls
                self._notify_progress(f"📊 限制模式:將處理所有 {total_articles} 篇文章")
        
        # 提取文章內容並即時分析存檔
        articles = []
        success_count = 0
        error_count = 0
        skip_count = 0
        
        for i, url in enumerate(articles_to_process, 1):
            try:
                self._notify_progress(f"📖 處理 {category_name} 文章 {i}/{len(articles_to_process)}: 正在提取內容...")
                article = self._extract_article_content(url, category)
                
                if article:
                    # 即時情感分析
                    if self.sentiment_analyzer:
                        self._notify_progress(f"🧠 分析 {category_name} 文章 {i}/{len(articles_to_process)}: {article.title[:30]}...")
                        sentiment_result = self.sentiment_analyzer.analyze_sentiment(
                            article.content, article.title
                        )
                        article.sentiment = sentiment_result['sentiment']
                        article.sentiment_score = sentiment_result['confidence']
                        
                        # 即時存檔
                        if self.database:
                            # 檢查重複
                            if not self.database.check_duplicate_by_title(article.title):
                                db_article = {
                                    'title': article.title,
                                    'content': article.content,
                                    'url': article.url,
                                    'source': article.source,
                                    'category': article.category,
                                    'published_date': article.published_date.isoformat(),
                                    'sentiment': article.sentiment,
                                    'sentiment_score': article.sentiment_score,
                                    'sentiment_method': 'auto'
                                }
                                
                                inserted, _ = self.database.insert_news([db_article])
                                if inserted > 0:
                                    self._notify_progress(f"💾 已保存 {category_name} 文章: {article.title[:30]}... (情緒: {article.sentiment})")
                                    success_count += 1
                                else:
                                    self._notify_progress(f"⏭️ 跳過重複 {category_name} 文章: {article.title[:30]}...")
                                    skip_count += 1
                            else:
                                self._notify_progress(f"⏭️ 跳過重複 {category_name} 文章: {article.title[:30]}...")
                                skip_count += 1
                    
                    articles.append(article)
                else:
                    error_count += 1
                
                # 文章間延遲
                if i < len(articles_to_process):
                    time.sleep(random.uniform(2, 6))  # 進一步縮短延遲時間
                
            except Exception as e:
                logger.error(f"處理文章時發生錯誤 {url}: {e}")
                self._notify_progress(f"❌ 處理 {category_name} 文章時發生錯誤: {str(e)[:50]}...")
                error_count += 1
                continue
        
        self._notify_progress(f"✅ {category_name} 分類爬取完成 - 處理: {len(articles_to_process)}, 成功: {success_count}, 跳過: {skip_count}, 錯誤: {error_count}")
        return articles
    
    def crawl_all_categories(self, unlimited: bool = True) -> Dict[str, List[NewsItem]]:
        """爬取所有分類的新聞 - 完全無限制版"""
        results = {}
        mode_text = "無限制" if unlimited else "限制"
        
        self._notify_progress(f"🚀 開始爬取所有分類 ({mode_text}模式)")
        
        for category in self.categories.keys():
            try:
                category_name = "美股" if category == "us_stock" else "台股"
                self._notify_progress(f"🎯 開始爬取 {category_name} 分類")
                
                # 使用新的unlimited參數
                articles = self.crawl_category(category, unlimited=unlimited)
                results[category] = articles
                
                # 分類間延遲
                if len(self.categories) > 1:
                    self._notify_progress(f"⏸️ 分類間休息...")
                    time.sleep(random.uniform(15, 30))  # 縮短休息時間
                
            except Exception as e:
                logger.error(f"爬取 {category} 分類時發生錯誤: {e}")
                self._notify_progress(f"❌ 爬取 {category} 分類時發生錯誤: {str(e)}")
                results[category] = []
        
        total_articles = sum(len(articles) for articles in results.values())
        self._notify_progress(f"🎉 所有分類爬取完成 ({mode_text}模式),總共處理 {total_articles} 篇文章")
        
        return results