hibana2077
commited on
Commit
·
39c419c
1
Parent(s):
0e99d37
Remove old train.parquet file and update new file with larger size; enhance convert.py to drop duplicates and reset index; add PTT article crawler script
Browse files- crawler/ptt/main.py +125 -0
- crawler/ptt/ptt.txt +0 -0
- data/train.parquet +2 -2
- prepare/convert.py +3 -1
- prepare/ptt.txt +0 -0
- prepare/threads.txt +2 -1
- prepare/train.parquet +0 -3
crawler/ptt/main.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from bs4 import BeautifulSoup
|
| 2 |
+
import requests
|
| 3 |
+
import re
|
| 4 |
+
import logging
|
| 5 |
+
from typing import Dict, List, Tuple
|
| 6 |
+
|
| 7 |
+
logging.basicConfig(
|
| 8 |
+
level=logging.INFO,
|
| 9 |
+
format='%(asctime)s - %(levelname)s - %(message)s'
|
| 10 |
+
)
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
def parse_article(html_content: str) -> Tuple[str, List[Dict]]:
|
| 14 |
+
soup = BeautifulSoup(html_content, "html.parser")
|
| 15 |
+
main_content_div = soup.find("div", id="main-content")
|
| 16 |
+
if not main_content_div:
|
| 17 |
+
return "", []
|
| 18 |
+
main_content_copy = main_content_div.__copy__()
|
| 19 |
+
for tag in main_content_copy.find_all(["div", "span"], class_=["article-metaline", "article-metaline-right", "f2", "push"]):
|
| 20 |
+
tag.decompose()
|
| 21 |
+
# 移除網址
|
| 22 |
+
text = main_content_copy.get_text(strip=True, separator="\n")
|
| 23 |
+
text = re.sub(r'https?://\S+', '', text)
|
| 24 |
+
pushes = soup.find_all("div", class_="push")
|
| 25 |
+
push_list = []
|
| 26 |
+
for push in pushes:
|
| 27 |
+
try:
|
| 28 |
+
tag = push.find("span", class_="push-tag").get_text(strip=True)
|
| 29 |
+
user_id = push.find("span", class_="push-userid").get_text(strip=True)
|
| 30 |
+
content = push.find("span", class_="push-content").get_text(strip=True).lstrip(": ")
|
| 31 |
+
time = push.find("span", class_="push-ipdatetime").get_text(strip=True)
|
| 32 |
+
push_list.append({
|
| 33 |
+
"tag": tag,
|
| 34 |
+
"user_id": user_id,
|
| 35 |
+
"content": content,
|
| 36 |
+
"time": time
|
| 37 |
+
})
|
| 38 |
+
except AttributeError as e:
|
| 39 |
+
logger.warning(f"解析推文時發生錯誤: {e}")
|
| 40 |
+
continue
|
| 41 |
+
return text, push_list
|
| 42 |
+
|
| 43 |
+
def main():
|
| 44 |
+
"""從網址讀取多篇文章,並顯示內文"""
|
| 45 |
+
import re
|
| 46 |
+
import time
|
| 47 |
+
import urllib
|
| 48 |
+
from multiprocessing import Pool
|
| 49 |
+
from requests_html import HTMLSession
|
| 50 |
+
|
| 51 |
+
def parse_article_entries(elements):
|
| 52 |
+
results = []
|
| 53 |
+
for element in elements:
|
| 54 |
+
try:
|
| 55 |
+
push = element.find('.nrec', first=True).text
|
| 56 |
+
mark = element.find('.mark', first=True).text
|
| 57 |
+
title = element.find('.title', first=True).text
|
| 58 |
+
author = element.find('.meta > .author', first=True).text
|
| 59 |
+
date = element.find('.meta > .date', first=True).text
|
| 60 |
+
link = element.find('.title > a', first=True).attrs['href']
|
| 61 |
+
|
| 62 |
+
# 使用正規表示式過濾合法 PTT 網址
|
| 63 |
+
if not re.match(r"^/bbs/\w+/M\.\d+\.A\.\w+\.html$", link):
|
| 64 |
+
continue
|
| 65 |
+
|
| 66 |
+
except AttributeError:
|
| 67 |
+
if '(本文已被刪除)' in title:
|
| 68 |
+
match_author = re.search('\[(\w*)\]', title)
|
| 69 |
+
if match_author:
|
| 70 |
+
author = match_author.group(1)
|
| 71 |
+
elif re.search('已被\w*刪除', title):
|
| 72 |
+
match_author = re.search('\<(\w*)\>', title)
|
| 73 |
+
if match_author:
|
| 74 |
+
author = match_author.group(1)
|
| 75 |
+
results.append({
|
| 76 |
+
'push': push,
|
| 77 |
+
'mark': mark,
|
| 78 |
+
'title': title,
|
| 79 |
+
'author': author,
|
| 80 |
+
'date': date,
|
| 81 |
+
'link': link
|
| 82 |
+
})
|
| 83 |
+
return results
|
| 84 |
+
|
| 85 |
+
def parse_next_link(controls):
|
| 86 |
+
link = controls[1].attrs['href']
|
| 87 |
+
return urllib.parse.urljoin('https://www.ptt.cc/', link)
|
| 88 |
+
|
| 89 |
+
def get_posts(post_links):
|
| 90 |
+
with Pool(processes=8) as pool:
|
| 91 |
+
responses = pool.map(session.get, post_links)
|
| 92 |
+
return responses
|
| 93 |
+
|
| 94 |
+
session = HTMLSession()
|
| 95 |
+
session.cookies.set('over18', '1')
|
| 96 |
+
url = 'https://www.ptt.cc/bbs/LoL/index.html'
|
| 97 |
+
|
| 98 |
+
num_page = 10
|
| 99 |
+
post_links = []
|
| 100 |
+
for _ in range(num_page):
|
| 101 |
+
response = session.get(url)
|
| 102 |
+
metadata = parse_article_entries(elements=response.html.find('div.r-ent'))
|
| 103 |
+
next_page_url = parse_next_link(controls=response.html.find('.action-bar a.btn.wide'))
|
| 104 |
+
post_links += [urllib.parse.urljoin(url, meta['link']) for meta in metadata]
|
| 105 |
+
url = next_page_url
|
| 106 |
+
|
| 107 |
+
start_time = time.time()
|
| 108 |
+
results = get_posts(post_links)
|
| 109 |
+
print(f'共 {len(results)} 篇文章,花費: {time.time() - start_time:.2f} 秒\n')
|
| 110 |
+
|
| 111 |
+
txt_file = "ptt.txt"
|
| 112 |
+
for i, res in enumerate(results):
|
| 113 |
+
main_text, _ = parse_article(res.text)
|
| 114 |
+
# print(f'--- 第 {i+1} 篇文章 ---')
|
| 115 |
+
# print(main_text)
|
| 116 |
+
# print('\n')
|
| 117 |
+
# write to text file
|
| 118 |
+
with open(txt_file, "a", encoding="utf-8") as f:
|
| 119 |
+
f.write(main_text + "\n\n")
|
| 120 |
+
print(f"文章已寫入 {txt_file}")
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
if __name__ == "__main__":
|
| 125 |
+
main()
|
crawler/ptt/ptt.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/train.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:61b17507086708ca1fce53157fdd6fa9bc36049750b1e776adf6cdc134bab144
|
| 3 |
+
size 636201
|
prepare/convert.py
CHANGED
|
@@ -49,7 +49,9 @@ def main():
|
|
| 49 |
|
| 50 |
# Create DataFrame
|
| 51 |
df = pd.DataFrame(all_data, columns=['text', 'origin'])
|
| 52 |
-
|
|
|
|
|
|
|
| 53 |
# Save to CSV
|
| 54 |
# csv_path = "train.csv"
|
| 55 |
# df.to_csv(csv_path, index=False)
|
|
|
|
| 49 |
|
| 50 |
# Create DataFrame
|
| 51 |
df = pd.DataFrame(all_data, columns=['text', 'origin'])
|
| 52 |
+
df.drop_duplicates(subset=['text'], inplace=True)
|
| 53 |
+
df.reset_index(drop=True, inplace=True)
|
| 54 |
+
print(f"Total unique lines: {len(df)}")
|
| 55 |
# Save to CSV
|
| 56 |
# csv_path = "train.csv"
|
| 57 |
# df.to_csv(csv_path, index=False)
|
prepare/ptt.txt
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
prepare/threads.txt
CHANGED
|
@@ -548,4 +548,5 @@ Academia Sinica 中央研究院 電波生物教師 LLM實習工程師
|
|
| 548 |
不願成為麻木的人,所以盡力去共情。我堅持的正義是我不變成施暴者,也不變成烏合之眾聽他人讒言。
|
| 549 |
我要保持我的判斷,我的野心是我的思想,和他人毫無關係。
|
| 550 |
朋友擔心我到泰國不安全 讓我每天狂發日常給她看 順帶也發一下這裡
|
| 551 |
-
朋友說有一種我屬於富士山的美。
|
|
|
|
|
|
| 548 |
不願成為麻木的人,所以盡力去共情。我堅持的正義是我不變成施暴者,也不變成烏合之眾聽他人讒言。
|
| 549 |
我要保持我的判斷,我的野心是我的思想,和他人毫無關係。
|
| 550 |
朋友擔心我到泰國不安全 讓我每天狂發日常給她看 順帶也發一下這裡
|
| 551 |
+
朋友說有一種我屬於富士山的美。
|
| 552 |
+
記得下次實習生報到要先問用不用機械鍵盤,用的話就給警告不能拿來公司用
|
prepare/train.parquet
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:f72440b5e35b6ba5178ac61b8d742759843cb226a25377fd93338a91357dabbe
|
| 3 |
-
size 102537
|
|
|
|
|
|
|
|
|
|
|
|