File size: 4,572 Bytes
39c419c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
from bs4 import BeautifulSoup
import requests
import re
import logging
from typing import Dict, List, Tuple

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

def parse_article(html_content: str) -> Tuple[str, List[Dict]]:
    soup = BeautifulSoup(html_content, "html.parser")
    main_content_div = soup.find("div", id="main-content")
    if not main_content_div:
        return "", []
    main_content_copy = main_content_div.__copy__()
    for tag in main_content_copy.find_all(["div", "span"], class_=["article-metaline", "article-metaline-right", "f2", "push"]):
        tag.decompose()
    # 移除網址
    text = main_content_copy.get_text(strip=True, separator="\n")
    text = re.sub(r'https?://\S+', '', text)
    pushes = soup.find_all("div", class_="push")
    push_list = []
    for push in pushes:
        try:
            tag = push.find("span", class_="push-tag").get_text(strip=True)
            user_id = push.find("span", class_="push-userid").get_text(strip=True)
            content = push.find("span", class_="push-content").get_text(strip=True).lstrip(": ")
            time = push.find("span", class_="push-ipdatetime").get_text(strip=True)
            push_list.append({
                "tag": tag,
                "user_id": user_id,
                "content": content,
                "time": time
            })
        except AttributeError as e:
            logger.warning(f"解析推文時發生錯誤: {e}")
            continue
    return text, push_list

def main():
    """從網址讀取多篇文章,並顯示內文"""
    import re
    import time
    import urllib
    from multiprocessing import Pool
    from requests_html import HTMLSession

    def parse_article_entries(elements):
        results = []
        for element in elements:
            try:
                push = element.find('.nrec', first=True).text
                mark = element.find('.mark', first=True).text
                title = element.find('.title', first=True).text
                author = element.find('.meta > .author', first=True).text
                date = element.find('.meta > .date', first=True).text
                link = element.find('.title > a', first=True).attrs['href']

                # 使用正規表示式過濾合法 PTT 網址
                if not re.match(r"^/bbs/\w+/M\.\d+\.A\.\w+\.html$", link):
                    continue

            except AttributeError:
                if '(本文已被刪除)' in title:
                    match_author = re.search('\[(\w*)\]', title)
                    if match_author:
                        author = match_author.group(1)
                elif re.search('已被\w*刪除', title):
                    match_author = re.search('\<(\w*)\>', title)
                    if match_author:
                        author = match_author.group(1)
            results.append({
                'push': push,
                'mark': mark,
                'title': title,
                'author': author,
                'date': date,
                'link': link
            })
        return results

    def parse_next_link(controls):
        link = controls[1].attrs['href']
        return urllib.parse.urljoin('https://www.ptt.cc/', link)

    def get_posts(post_links):
        with Pool(processes=8) as pool:
            responses = pool.map(session.get, post_links)
            return responses

    session = HTMLSession()
    session.cookies.set('over18', '1')
    url = 'https://www.ptt.cc/bbs/LoL/index.html'

    num_page = 10
    post_links = []
    for _ in range(num_page):
        response = session.get(url)
        metadata = parse_article_entries(elements=response.html.find('div.r-ent'))
        next_page_url = parse_next_link(controls=response.html.find('.action-bar a.btn.wide'))
        post_links += [urllib.parse.urljoin(url, meta['link']) for meta in metadata]
        url = next_page_url

    start_time = time.time()
    results = get_posts(post_links)
    print(f'共 {len(results)} 篇文章,花費: {time.time() - start_time:.2f} 秒\n')

    txt_file = "ptt.txt"
    for i, res in enumerate(results):
        main_text, _ = parse_article(res.text)
        # print(f'--- 第 {i+1} 篇文章 ---')
        # print(main_text)
        # print('\n')
        # write to text file
        with open(txt_file, "a", encoding="utf-8") as f:
            f.write(main_text + "\n\n")
    print(f"文章已寫入 {txt_file}")

        

if __name__ == "__main__":
    main()