hibana2077
Remove old train.parquet file and update new file with larger size; enhance convert.py to drop duplicates and reset index; add PTT article crawler script
39c419c
| from bs4 import BeautifulSoup | |
| import requests | |
| import re | |
| import logging | |
| from typing import Dict, List, Tuple | |
| logging.basicConfig( | |
| level=logging.INFO, | |
| format='%(asctime)s - %(levelname)s - %(message)s' | |
| ) | |
| logger = logging.getLogger(__name__) | |
| def parse_article(html_content: str) -> Tuple[str, List[Dict]]: | |
| soup = BeautifulSoup(html_content, "html.parser") | |
| main_content_div = soup.find("div", id="main-content") | |
| if not main_content_div: | |
| return "", [] | |
| main_content_copy = main_content_div.__copy__() | |
| for tag in main_content_copy.find_all(["div", "span"], class_=["article-metaline", "article-metaline-right", "f2", "push"]): | |
| tag.decompose() | |
| # 移除網址 | |
| text = main_content_copy.get_text(strip=True, separator="\n") | |
| text = re.sub(r'https?://\S+', '', text) | |
| pushes = soup.find_all("div", class_="push") | |
| push_list = [] | |
| for push in pushes: | |
| try: | |
| tag = push.find("span", class_="push-tag").get_text(strip=True) | |
| user_id = push.find("span", class_="push-userid").get_text(strip=True) | |
| content = push.find("span", class_="push-content").get_text(strip=True).lstrip(": ") | |
| time = push.find("span", class_="push-ipdatetime").get_text(strip=True) | |
| push_list.append({ | |
| "tag": tag, | |
| "user_id": user_id, | |
| "content": content, | |
| "time": time | |
| }) | |
| except AttributeError as e: | |
| logger.warning(f"解析推文時發生錯誤: {e}") | |
| continue | |
| return text, push_list | |
| def main(): | |
| """從網址讀取多篇文章,並顯示內文""" | |
| import re | |
| import time | |
| import urllib | |
| from multiprocessing import Pool | |
| from requests_html import HTMLSession | |
| def parse_article_entries(elements): | |
| results = [] | |
| for element in elements: | |
| try: | |
| push = element.find('.nrec', first=True).text | |
| mark = element.find('.mark', first=True).text | |
| title = element.find('.title', first=True).text | |
| author = element.find('.meta > .author', first=True).text | |
| date = element.find('.meta > .date', first=True).text | |
| link = element.find('.title > a', first=True).attrs['href'] | |
| # 使用正規表示式過濾合法 PTT 網址 | |
| if not re.match(r"^/bbs/\w+/M\.\d+\.A\.\w+\.html$", link): | |
| continue | |
| except AttributeError: | |
| if '(本文已被刪除)' in title: | |
| match_author = re.search('\[(\w*)\]', title) | |
| if match_author: | |
| author = match_author.group(1) | |
| elif re.search('已被\w*刪除', title): | |
| match_author = re.search('\<(\w*)\>', title) | |
| if match_author: | |
| author = match_author.group(1) | |
| results.append({ | |
| 'push': push, | |
| 'mark': mark, | |
| 'title': title, | |
| 'author': author, | |
| 'date': date, | |
| 'link': link | |
| }) | |
| return results | |
| def parse_next_link(controls): | |
| link = controls[1].attrs['href'] | |
| return urllib.parse.urljoin('https://www.ptt.cc/', link) | |
| def get_posts(post_links): | |
| with Pool(processes=8) as pool: | |
| responses = pool.map(session.get, post_links) | |
| return responses | |
| session = HTMLSession() | |
| session.cookies.set('over18', '1') | |
| url = 'https://www.ptt.cc/bbs/LoL/index.html' | |
| num_page = 10 | |
| post_links = [] | |
| for _ in range(num_page): | |
| response = session.get(url) | |
| metadata = parse_article_entries(elements=response.html.find('div.r-ent')) | |
| next_page_url = parse_next_link(controls=response.html.find('.action-bar a.btn.wide')) | |
| post_links += [urllib.parse.urljoin(url, meta['link']) for meta in metadata] | |
| url = next_page_url | |
| start_time = time.time() | |
| results = get_posts(post_links) | |
| print(f'共 {len(results)} 篇文章,花費: {time.time() - start_time:.2f} 秒\n') | |
| txt_file = "ptt.txt" | |
| for i, res in enumerate(results): | |
| main_text, _ = parse_article(res.text) | |
| # print(f'--- 第 {i+1} 篇文章 ---') | |
| # print(main_text) | |
| # print('\n') | |
| # write to text file | |
| with open(txt_file, "a", encoding="utf-8") as f: | |
| f.write(main_text + "\n\n") | |
| print(f"文章已寫入 {txt_file}") | |
| if __name__ == "__main__": | |
| main() | |