File size: 14,242 Bytes
3a36548
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""

๊ธฐ๋ณธ์†Œ๋“๋‹น ํฌ๋กค๋Ÿฌ - ๊ณ ์„ฑ๋Šฅ ๋น„๋™๊ธฐ ๋ฒ„์ „ + ํ—ˆ๊น…ํŽ˜์ด์Šค ์ž๋™ ์—…๋กœ๋“œ

- ๊ทธ๋ˆ„๋ณด๋“œ 5 ๊ธฐ๋ฐ˜ ์‚ฌ์ดํŠธ (basicincomeparty.kr)

- td.td_subject / td.td_datetime(YY.MM.DD.) / div#bo_v_con ๊ตฌ์กฐ

"""

import os
import json
import re
import asyncio
from datetime import datetime, timedelta
from typing import List, Dict, Optional
import pandas as pd
from tqdm.asyncio import tqdm as async_tqdm
import aiohttp
from bs4 import BeautifulSoup
from dotenv import load_dotenv
from huggingface_hub import HfApi, login
from datasets import Dataset, load_dataset

load_dotenv()


class BasicIncomeAsyncCrawler:
    def __init__(self, config_path="crawler_config.json"):
        self.base_url = "https://basicincomeparty.kr"
        self.party_name = "๊ธฐ๋ณธ์†Œ๋“๋‹น"
        self.config_path = config_path
        self.state_path = "crawler_state.json"

        self.load_config()

        self.hf_token = os.getenv("HF_TOKEN")
        self.hf_repo_id = os.getenv("HF_REPO_ID_BASIC_INCOME", "basic-income-press-releases")

        self.semaphore = asyncio.Semaphore(10)

    def load_config(self):
        default_config = {
            "boards": {
                "๋…ผํ‰๋ณด๋„์ž๋ฃŒ": "bikr/press"
            },
            "start_date": "2020-01-08",
            "max_pages": 10000,
            "concurrent_requests": 10,
            "request_delay": 0.3,
            "output_path": "./data"
        }

        if os.path.exists(self.config_path):
            with open(self.config_path, 'r', encoding='utf-8') as f:
                config = json.load(f)
                self.config = config.get('basic_income', default_config)
        else:
            self.config = default_config

        self.boards = self.config["boards"]
        self.start_date = self.config["start_date"]
        self.max_pages = self.config["max_pages"]
        self.output_path = self.config["output_path"]

    def load_state(self) -> Dict:
        if os.path.exists(self.state_path):
            with open(self.state_path, 'r', encoding='utf-8') as f:
                state = json.load(f)
                return state.get('basic_income', {})
        return {}

    def save_state(self, state: Dict):
        all_state = {}
        if os.path.exists(self.state_path):
            with open(self.state_path, 'r', encoding='utf-8') as f:
                all_state = json.load(f)
        all_state['basic_income'] = state
        with open(self.state_path, 'w', encoding='utf-8') as f:
            json.dump(all_state, f, ensure_ascii=False, indent=2)

    @staticmethod
    def parse_date(date_str: str) -> Optional[datetime]:
        """YY.MM.DD. ๋˜๋Š” YYYY.MM.DD. ๋˜๋Š” YYYY-MM-DD ํŒŒ์‹ฑ"""
        date_str = date_str.strip().rstrip('.')
        try:
            parts = date_str.split('.')
            if len(parts) >= 3:
                year = int(parts[0])
                year = 2000 + year if year < 100 else year
                return datetime(year, int(parts[1]), int(parts[2]))
        except:
            pass
        try:
            return datetime.strptime(date_str[:10], '%Y-%m-%d')
        except:
            return None

    @staticmethod
    def clean_text(text: str) -> str:
        text = text.replace('\xa0', '').replace('\u200b', '').replace('โ€‹', '')
        return text.strip()

    async def fetch_with_retry(self, session: aiohttp.ClientSession, url: str,

                               max_retries: int = 3) -> Optional[str]:
        async with self.semaphore:
            for attempt in range(max_retries):
                try:
                    await asyncio.sleep(self.config.get("request_delay", 0.3))
                    async with session.get(url, timeout=aiohttp.ClientTimeout(total=15)) as response:
                        if response.status == 200:
                            return await response.text()
                except Exception:
                    if attempt < max_retries - 1:
                        await asyncio.sleep(1)
                    else:
                        return None
        return None

    async def fetch_list_page(self, session: aiohttp.ClientSession,

                              board_name: str, board_path: str, page_num: int,

                              start_date: datetime, end_date: datetime) -> tuple:
        url = f"{self.base_url}/{board_path}?page={page_num}"

        html = await self.fetch_with_retry(session, url)
        if not html:
            return [], False

        soup = BeautifulSoup(html, 'html.parser')
        rows = soup.select('table tbody tr')
        if not rows:
            return [], True

        data = []
        stop_flag = False

        for row in rows:
            try:
                # ์ œ๋ชฉยทURL: td.td_subject div.bo_tit a
                title_a = row.select_one('td.td_subject div.bo_tit a')
                if not title_a:
                    continue

                title = title_a.get_text(strip=True)
                href = title_a.get('href', '')
                # page ํŒŒ๋ผ๋ฏธํ„ฐ ์ œ๊ฑฐ ํ›„ ์ ˆ๋Œ€ URL
                article_url = re.sub(r'\?.*$', '', href)
                if not article_url.startswith('http'):
                    article_url = self.base_url + article_url

                # ๋‚ ์งœ: td.td_datetime (YY.MM.DD. ํ˜•์‹)
                date_td = row.select_one('td.td_datetime')
                if not date_td:
                    continue
                date_str = date_td.get_text(strip=True)

                # ์นดํ…Œ๊ณ ๋ฆฌ: td.td_num2 a.bo_cate_link
                cate_a = row.select_one('td.td_num2 a.bo_cate_link')
                category = cate_a.get_text(strip=True) if cate_a else ""

                article_date = self.parse_date(date_str)
                if not article_date:
                    continue
                if article_date < start_date:
                    stop_flag = True
                    break
                if article_date > end_date:
                    continue

                data.append({
                    'board_name': board_name,
                    'title': title,
                    'category': category,
                    'date': article_date.strftime('%Y-%m-%d'),  # YYYY-MM-DD ์ •๊ทœํ™”
                    'url': article_url
                })
            except:
                continue

        return data, stop_flag

    async def fetch_article_detail(self, session: aiohttp.ClientSession, url: str) -> Dict:
        html = await self.fetch_with_retry(session, url)
        if not html:
            return {'text': "๋ณธ๋ฌธ ์กฐํšŒ ์‹คํŒจ", 'writer': ""}

        soup = BeautifulSoup(html, 'html.parser')
        text_parts = []
        writer = ""

        # ๋ณธ๋ฌธ: div#bo_v_con
        contents_div = soup.find('div', id='bo_v_con')
        if contents_div:
            for p in contents_div.find_all('p'):
                cleaned = self.clean_text(p.get_text(strip=True))
                if cleaned:
                    text_parts.append(cleaned)

        # ์ž‘์„ฑ์ž: section#bo_v_info div.profile_info_ct ์•ˆ์˜ span.sv_member
        info_div = soup.select_one('section#bo_v_info div.profile_info_ct')
        if info_div:
            writer_el = info_div.find('span', class_='sv_member')
            if writer_el:
                writer = writer_el.get_text(strip=True)

        return {'text': '\n'.join(text_parts), 'writer': writer}

    async def collect_board(self, board_name: str, board_path: str,

                            start_date: str, end_date: str) -> List[Dict]:
        start_dt = datetime.strptime(start_date, '%Y-%m-%d')
        end_dt = datetime.strptime(end_date, '%Y-%m-%d')

        print(f"\nโ–ถ [{board_name}] ๋ชฉ๋ก ์ˆ˜์ง‘ ์‹œ์ž‘...")

        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'ko-KR,ko;q=0.9',
        }

        async with aiohttp.ClientSession(headers=headers) as session:
            all_items = []
            page_num = 1
            empty_pages = 0
            max_empty_pages = 3

            with async_tqdm(desc=f"[{board_name}] ๋ชฉ๋ก", unit="ํŽ˜์ด์ง€") as pbar:
                while page_num <= self.max_pages:
                    items, stop_flag = await self.fetch_list_page(
                        session, board_name, board_path, page_num, start_dt, end_dt
                    )

                    if not items:
                        empty_pages += 1
                        if empty_pages >= max_empty_pages or stop_flag:
                            break
                    else:
                        empty_pages = 0
                        all_items.extend(items)

                    pbar.update(1)
                    pbar.set_postfix({"์ˆ˜์ง‘": len(all_items)})

                    if stop_flag:
                        break

                    page_num += 1

            print(f"  โœ“ {len(all_items)}๊ฐœ ํ•ญ๋ชฉ ๋ฐœ๊ฒฌ")

            if all_items:
                print(f"  โ–ถ ์ƒ์„ธ ํŽ˜์ด์ง€ ์ˆ˜์ง‘ ์ค‘...")
                tasks = [self.fetch_article_detail(session, item['url']) for item in all_items]

                details = []
                for coro in async_tqdm(asyncio.as_completed(tasks),
                                       total=len(tasks),
                                       desc=f"[{board_name}] ์ƒ์„ธ"):
                    detail = await coro
                    details.append(detail)

                for item, detail in zip(all_items, details):
                    item.update(detail)

        print(f"โœ“ [{board_name}] ์™„๋ฃŒ: {len(all_items)}๊ฐœ")
        return all_items

    async def collect_all(self, start_date: Optional[str] = None,

                          end_date: Optional[str] = None) -> pd.DataFrame:
        if not end_date:
            end_date = datetime.now().strftime('%Y-%m-%d')
        if not start_date:
            start_date = self.start_date

        print(f"\n{'='*60}")
        print(f"๊ธฐ๋ณธ์†Œ๋“๋‹น ๋ณด๋„์ž๋ฃŒ ์ˆ˜์ง‘ - ๋น„๋™๊ธฐ ๊ณ ์„ฑ๋Šฅ ๋ฒ„์ „")
        print(f"๊ธฐ๊ฐ„: {start_date} ~ {end_date}")
        print(f"{'='*60}")

        tasks = [
            self.collect_board(board_name, board_path, start_date, end_date)
            for board_name, board_path in self.boards.items()
        ]
        results = await asyncio.gather(*tasks)

        all_data = []
        for items in results:
            all_data.extend(items)

        if not all_data:
            print("\nโš ๏ธ ์ˆ˜์ง‘๋œ ๋ฐ์ดํ„ฐ ์—†์Œ")
            return pd.DataFrame()

        df = pd.DataFrame(all_data)
        df = df[['board_name', 'title', 'category', 'date', 'writer', 'text', 'url']]
        df = df[(df['title'] != "") & (df['text'] != "")]
        df['date'] = pd.to_datetime(df['date'], errors='coerce')

        print(f"\nโœ“ ์ด {len(df)}๊ฐœ ์ˆ˜์ง‘ ์™„๋ฃŒ")
        return df

    def save_local(self, df: pd.DataFrame):
        os.makedirs(self.output_path, exist_ok=True)
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        csv_path = os.path.join(self.output_path, f"{self.party_name}_{timestamp}.csv")
        xlsx_path = os.path.join(self.output_path, f"{self.party_name}_{timestamp}.xlsx")
        df.to_csv(csv_path, index=False, encoding='utf-8-sig')
        df.to_excel(xlsx_path, index=False, engine='openpyxl')
        print(f"โœ“ CSV: {csv_path}")
        print(f"โœ“ Excel: {xlsx_path}")

    def upload_to_huggingface(self, df: pd.DataFrame):
        if not self.hf_token:
            print("\nโš ๏ธ HF_TOKEN์ด ์„ค์ •๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค.")
            return

        print(f"\nโ–ถ ํ—ˆ๊น…ํŽ˜์ด์Šค ์—…๋กœ๋“œ ์ค‘... (repo: {self.hf_repo_id})")
        try:
            login(token=self.hf_token)
            new_dataset = Dataset.from_pandas(df)
            try:
                existing_dataset = load_dataset(self.hf_repo_id, split='train')
                existing_df = existing_dataset.to_pandas()
                combined_df = pd.concat([existing_df, df], ignore_index=True)
                combined_df = combined_df.drop_duplicates(subset=['url'], keep='last')
                combined_df = combined_df.sort_values('date', ascending=False).reset_index(drop=True)
                final_dataset = Dataset.from_pandas(combined_df)
                print(f"  โœ“ ๋ณ‘ํ•ฉ ํ›„: {len(final_dataset)}๊ฐœ")
            except:
                final_dataset = new_dataset
                print(f"  โ„น๏ธ ์‹ ๊ทœ ๋ฐ์ดํ„ฐ์…‹ ์ƒ์„ฑ")
            final_dataset.push_to_hub(self.hf_repo_id, token=self.hf_token)
            print(f"โœ“ ํ—ˆ๊น…ํŽ˜์ด์Šค ์—…๋กœ๋“œ ์™„๋ฃŒ!")
        except Exception as e:
            print(f"โœ— ์—…๋กœ๋“œ ์‹คํŒจ: {e}")

    async def run_incremental(self):
        state = self.load_state()
        last_date = state.get('last_crawl_date')

        if last_date:
            start_date = (datetime.strptime(last_date, '%Y-%m-%d') + timedelta(days=1)).strftime('%Y-%m-%d')
            print(f"๐Ÿ“… ์ฆ๋ถ„ ์—…๋ฐ์ดํŠธ: {start_date} ์ดํ›„ ๋ฐ์ดํ„ฐ๋งŒ ์ˆ˜์ง‘")
        else:
            start_date = self.start_date
            print(f"๐Ÿ“… ์ „์ฒด ์ˆ˜์ง‘: {start_date}๋ถ€ํ„ฐ")

        end_date = datetime.now().strftime('%Y-%m-%d')
        df = await self.collect_all(start_date, end_date)

        if df.empty:
            print("โœ“ ์ƒˆ๋กœ์šด ๋ฐ์ดํ„ฐ ์—†์Œ")
            return

        self.save_local(df)
        self.upload_to_huggingface(df)

        state['last_crawl_date'] = end_date
        state['last_crawl_time'] = datetime.now().isoformat()
        state['last_count'] = len(df)
        self.save_state(state)

        print(f"\n{'='*60}\nโœ“ ์™„๋ฃŒ!\n{'='*60}\n")


async def main():
    crawler = BasicIncomeAsyncCrawler()
    await crawler.run_incremental()


if __name__ == "__main__":
    asyncio.run(main())