|
|
import os
|
|
|
import requests
|
|
|
from bs4 import BeautifulSoup
|
|
|
import sqlite3
|
|
|
import threading
|
|
|
from queue import Queue
|
|
|
|
|
|
|
|
|
BASE_URL = "https://en.wikipedia.org"
|
|
|
START_URL = f"{BASE_URL}/wiki/Main_Page"
|
|
|
ARCHIVE_FOLDER = "archive"
|
|
|
DB_FILE = "visited_links.db"
|
|
|
THREAD_COUNT = 45
|
|
|
|
|
|
|
|
|
os.makedirs(ARCHIVE_FOLDER, exist_ok=True)
|
|
|
|
|
|
|
|
|
conn = sqlite3.connect(DB_FILE, check_same_thread=False)
|
|
|
cur = conn.cursor()
|
|
|
cur.execute("""
|
|
|
CREATE TABLE IF NOT EXISTS visited_links (
|
|
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
|
url TEXT UNIQUE
|
|
|
)
|
|
|
""")
|
|
|
|
|
|
|
|
|
cur.execute("PRAGMA table_info(visited_links)")
|
|
|
columns = [row[1] for row in cur.fetchall()]
|
|
|
if "explored" not in columns:
|
|
|
cur.execute("ALTER TABLE visited_links ADD COLUMN explored INTEGER DEFAULT 0")
|
|
|
conn.commit()
|
|
|
|
|
|
|
|
|
link_queue = Queue()
|
|
|
|
|
|
|
|
|
cur.execute("SELECT url FROM visited_links WHERE explored = 0")
|
|
|
for row in cur.fetchall():
|
|
|
link_queue.put(row[0])
|
|
|
|
|
|
|
|
|
if link_queue.empty():
|
|
|
link_queue.put(START_URL)
|
|
|
|
|
|
lock = threading.Lock()
|
|
|
|
|
|
def save_article(title, content):
|
|
|
"""Enregistre l'article sous forme de fichier texte."""
|
|
|
filename = f"{title}.txt".replace("/", "_")
|
|
|
filepath = os.path.join(ARCHIVE_FOLDER, filename)
|
|
|
with open(filepath, "w", encoding="utf-8") as file:
|
|
|
file.write(content)
|
|
|
|
|
|
def is_visited(url):
|
|
|
"""Vérifie si le lien a déjà été visité."""
|
|
|
with lock:
|
|
|
cur.execute("SELECT 1 FROM visited_links WHERE url = ?", (url,))
|
|
|
return cur.fetchone() is not None
|
|
|
|
|
|
def mark_as_visited(url):
|
|
|
"""Ajoute le lien à la base de données."""
|
|
|
with lock:
|
|
|
cur.execute("INSERT OR IGNORE INTO visited_links (url) VALUES (?)", (url,))
|
|
|
conn.commit()
|
|
|
|
|
|
def mark_as_explored(url):
|
|
|
"""Marque un lien comme entièrement exploré."""
|
|
|
with lock:
|
|
|
cur.execute("UPDATE visited_links SET explored = 1 WHERE url = ?", (url,))
|
|
|
conn.commit()
|
|
|
|
|
|
def crawl():
|
|
|
"""Crawl une page Wikipédia pour extraire des liens et le contenu."""
|
|
|
while not link_queue.empty():
|
|
|
url = link_queue.get()
|
|
|
if is_visited(url) and not link_queue.empty():
|
|
|
link_queue.task_done()
|
|
|
continue
|
|
|
|
|
|
try:
|
|
|
print(f"Crawling: {url}")
|
|
|
response = requests.get(url)
|
|
|
response.raise_for_status()
|
|
|
soup = BeautifulSoup(response.text, "html.parser")
|
|
|
|
|
|
|
|
|
title = soup.find("h1", {"id": "firstHeading"}).text
|
|
|
paragraphs = soup.find_all("p")
|
|
|
content = "\n\n".join(p.text.strip() for p in paragraphs if p.text.strip())
|
|
|
|
|
|
|
|
|
if content:
|
|
|
save_article(title, content)
|
|
|
mark_as_visited(url)
|
|
|
|
|
|
|
|
|
links = set()
|
|
|
for a_tag in soup.find_all("a", href=True):
|
|
|
href = a_tag["href"]
|
|
|
if href.startswith("/wiki/") and not any(prefix in href for prefix in [":", "#"]):
|
|
|
full_url = BASE_URL + href
|
|
|
if not is_visited(full_url):
|
|
|
links.add(full_url)
|
|
|
|
|
|
|
|
|
for link in links:
|
|
|
link_queue.put(link)
|
|
|
|
|
|
|
|
|
mark_as_explored(url)
|
|
|
|
|
|
except Exception as e:
|
|
|
print(f"Erreur lors du crawl de {url}: {e}")
|
|
|
|
|
|
finally:
|
|
|
link_queue.task_done()
|
|
|
|
|
|
def main():
|
|
|
|
|
|
threads = []
|
|
|
for _ in range(THREAD_COUNT):
|
|
|
t = threading.Thread(target=crawl)
|
|
|
t.daemon = True
|
|
|
threads.append(t)
|
|
|
t.start()
|
|
|
|
|
|
|
|
|
link_queue.join()
|
|
|
for t in threads:
|
|
|
t.join()
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
main()
|
|
|
|