|
|
import re |
|
|
from typing import List, Dict, Optional, Set |
|
|
from bs4 import BeautifulSoup |
|
|
from forum_parser import InfostartForumParser |
|
|
import csv |
|
|
import os |
|
|
import logging |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
class ThinkModelForumParser(InfostartForumParser): |
|
|
def __init__(self, csv_file: str = 'think_model_dataset.csv', delay: float = 1.0): |
|
|
super().__init__(csv_file, delay) |
|
|
self.csv_file = csv_file |
|
|
self._init_csv() |
|
|
|
|
|
def _init_csv(self): |
|
|
"""Инициализация CSV файла с заголовками для think model""" |
|
|
if not os.path.exists(self.csv_file): |
|
|
with open(self.csv_file, 'w', newline='', encoding='utf-8') as file: |
|
|
writer = csv.writer(file, quoting=csv.QUOTE_ALL) |
|
|
writer.writerow(['source', 'in_source_id', 'prompt', 'think_process', 'solution', 'is_answer_a_link', 'has_link']) |
|
|
|
|
|
def extract_thread_conversation(self, soup: BeautifulSoup) -> str: |
|
|
"""Извлечение всей ветки обсуждения в формате think process""" |
|
|
conversation = [] |
|
|
|
|
|
|
|
|
messages = soup.find_all('div', class_='m-tree-p') |
|
|
|
|
|
for msg in messages: |
|
|
|
|
|
message_text_div = msg.find('div', class_='forum-message-text') |
|
|
if message_text_div: |
|
|
text = self.clean_message_text(message_text_div) |
|
|
if text: |
|
|
|
|
|
text = self.clean_solution_text(text) |
|
|
if text: |
|
|
conversation.append(text) |
|
|
|
|
|
|
|
|
think_process = "<think>\n" |
|
|
think_process += "\n---\n".join(conversation) |
|
|
think_process += "\n</think>" |
|
|
|
|
|
return think_process |
|
|
|
|
|
def count_links_in_text(self, text: str) -> int: |
|
|
"""Подсчет количества ссылок в тексте""" |
|
|
|
|
|
url_pattern = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' |
|
|
links = re.findall(url_pattern, text) |
|
|
return len(links) |
|
|
|
|
|
def is_answer_mostly_link(self, text: str) -> bool: |
|
|
"""Проверка, состоит ли ответ в основном из ссылки (>80%)""" |
|
|
|
|
|
url_pattern = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' |
|
|
links = re.findall(url_pattern, text) |
|
|
|
|
|
if not links: |
|
|
return False |
|
|
|
|
|
|
|
|
total_length = len(text.strip()) |
|
|
links_length = sum(len(link) for link in links) |
|
|
|
|
|
|
|
|
return (links_length / total_length) > 0.8 if total_length > 0 else False |
|
|
|
|
|
async def parse_topic(self, session, topic_url: str) -> Optional[List[Dict]]: |
|
|
"""Переопределенный метод парсинга темы для think model""" |
|
|
topic_id = self.extract_topic_id(topic_url) |
|
|
if not topic_id: |
|
|
logger.debug(f"Невалидный URL: {topic_url}") |
|
|
return None |
|
|
|
|
|
logger.info(f"Парсинг темы: {topic_url}") |
|
|
|
|
|
html = await self.fetch_page(session, topic_url) |
|
|
if not html: |
|
|
return None |
|
|
|
|
|
soup = BeautifulSoup(html, 'html.parser') |
|
|
|
|
|
|
|
|
meta_id = self.extract_meta_identifier(soup) |
|
|
if not meta_id: |
|
|
logger.warning(f"Не найден meta identifier для {topic_url}") |
|
|
return None |
|
|
|
|
|
|
|
|
if meta_id in self.processed_urls: |
|
|
logger.debug(f"Тема {meta_id} уже обработана") |
|
|
return None |
|
|
|
|
|
|
|
|
prompt = self.extract_first_message(soup) |
|
|
if not prompt: |
|
|
logger.warning(f"Не найден вопрос для {topic_url}") |
|
|
return None |
|
|
|
|
|
think_process = self.extract_thread_conversation(soup) |
|
|
solutions = self.extract_solutions(soup) |
|
|
|
|
|
|
|
|
if solutions: |
|
|
|
|
|
combined_solution = "\n---\n".join(solutions) |
|
|
else: |
|
|
combined_solution = "" |
|
|
|
|
|
|
|
|
has_link = self.count_links_in_text(combined_solution) |
|
|
is_answer_a_link = self.is_answer_mostly_link(combined_solution) |
|
|
|
|
|
self.processed_urls.add(meta_id) |
|
|
|
|
|
return [{ |
|
|
'source': 'forum_infostart', |
|
|
'in_source_id': meta_id, |
|
|
'prompt': prompt, |
|
|
'think_process': think_process, |
|
|
'solution': combined_solution, |
|
|
'is_answer_a_link': is_answer_a_link, |
|
|
'has_link': has_link if has_link > 0 else 'NaN' |
|
|
}] |
|
|
|
|
|
def save_to_csv(self, data: List[Dict]): |
|
|
"""Переопределенный метод сохранения в CSV для think model""" |
|
|
if not data: |
|
|
return |
|
|
|
|
|
with open(self.csv_file, 'a', newline='', encoding='utf-8') as file: |
|
|
writer = csv.DictWriter(file, fieldnames=['source', 'in_source_id', 'prompt', 'think_process', 'solution', 'is_answer_a_link', 'has_link'], |
|
|
quoting=csv.QUOTE_ALL) |
|
|
for row in data: |
|
|
|
|
|
escaped_row = {} |
|
|
for key, value in row.items(): |
|
|
if isinstance(value, str): |
|
|
escaped_row[key] = self.escape_for_csv(value) |
|
|
else: |
|
|
escaped_row[key] = value |
|
|
writer.writerow(escaped_row) |
|
|
|
|
|
logger.info(f"Сохранено {len(data)} записей в {self.csv_file}") |
|
|
|
|
|
async def main(): |
|
|
parser = ThinkModelForumParser() |
|
|
await parser.parse_all_pages(start_page=1, end_page=2100) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
import asyncio |
|
|
asyncio.run(main()) |