| import requests |
| from bs4 import BeautifulSoup |
| import json |
| def crawl_scp_series(url,num): |
| response = requests.get(url) |
| soup = BeautifulSoup(response.content, 'html.parser') |
|
|
| |
| content_div = soup.find('div', id='page-content') |
|
|
| |
| links = [a['href'] for a in content_div.find_all('a', href=True) if a.text] |
|
|
| |
| stories = [] |
|
|
| for link in links: |
| |
| if not link.startswith('http'): |
| link = f"https://scp-wiki.wikidot.com{link}" |
|
|
| |
| try: |
| story_response = requests.get(link) |
| story_soup = BeautifulSoup(story_response.content, 'html.parser') |
| if story_soup: |
| |
| page_content = story_soup.find('div', id='page-content') |
| if page_content: |
| first_div = page_content.find('div', style="text-align: right;") |
| if first_div: |
| first_div.decompose() |
|
|
| |
| licensebox_div = page_content.find('div', class_='licensebox') |
| if licensebox_div: |
| licensebox_div.decompose() |
| print("Found page-content div") |
| else: |
| print(f"Could not find page-content div for {link}") |
|
|
| if page_content: |
| story_text = page_content.get_text().strip() |
| stories.append(story_text) |
|
|
| |
| if len(stories) == 10: |
| |
| with open(f"scp_jokes.jsonl", 'a') as file: |
| for story in stories: |
| json_record = json.dumps({'text': story}) |
| file.write(json_record + '\n') |
|
|
| |
| stories = [] |
| except requests.exceptions.RequestException as e: |
| print(f"Error fetching {link}: {e}") |
| |
|
|
| |
| urls = ['https://scp-wiki.wikidot.com/joke-scps'] |
| num=1 |
| |
| for url in urls: |
| crawl_scp_series(url,num) |
| print(url) |
| num+=1 |
|
|