| import requests
|
| from bs4 import BeautifulSoup
|
| import os
|
| import gzip
|
| import shutil
|
| import logging
|
| import json
|
| import bz2
|
| import xml.sax
|
|
|
|
|
| WIKI_CODE = 'tigwiki'
|
| BASE_URL = f'https://dumps.wikimedia.org/{WIKI_CODE}/'
|
| FILE_PATTERN = f'{WIKI_CODE}-*-pages-articles-multistream.xml.bz2'
|
| OUTPUT_DIR = 'tigre_raw_dump'
|
| FINAL_FILENAME = 'tigre_wikipedia.jsonl.gz'
|
|
|
|
|
| logging.basicConfig(level=logging.INFO,
|
| format='%(asctime)s - %(levelname)s - %(message)s')
|
|
|
| class WikiDumpHandler(xml.sax.ContentHandler):
|
| """
|
| SAX handler to parse Wikipedia XML and extract title and text.
|
| SAX is used because it's a streaming parser, ideal for huge files.
|
| """
|
| def __init__(self, output_file):
|
| super().__init__()
|
| self.output_file = output_file
|
| self.current_tag = ""
|
| self.in_page = False
|
| self.in_revision = False
|
| self.in_text = False
|
| self.in_title = False
|
| self.current_title = ""
|
| self.current_text = ""
|
| self.current_id = ""
|
| self.article_count = 0
|
| self.logging_interval = 500
|
|
|
| def startElement(self, name, attrs):
|
| self.current_tag = name
|
| if name == "page":
|
| self.in_page = True
|
| self.current_title = ""
|
| self.current_id = ""
|
| elif name == "revision":
|
| self.in_revision = True
|
| elif name == "text":
|
| self.in_text = True
|
| self.current_text = ""
|
| elif name == "title":
|
| self.in_title = True
|
|
|
| def characters(self, content):
|
| if self.in_page:
|
| if self.in_title:
|
| self.current_title += content
|
| elif self.current_tag == "id" and not self.current_id:
|
| self.current_id = content
|
| elif self.in_revision and self.in_text:
|
| self.current_text += content
|
|
|
| def endElement(self, name):
|
| if name == "text":
|
| self.in_text = False
|
| elif name == "title":
|
| self.in_title = False
|
| elif name == "revision":
|
| self.in_revision = False
|
| elif name == "page":
|
| self.in_page = False
|
|
|
| if self.current_text and ":" not in self.current_title:
|
| self.write_article()
|
|
|
| self.current_title = ""
|
| self.current_text = ""
|
| self.current_id = ""
|
|
|
| def write_article(self):
|
| cleaned_text = self.current_text.strip()
|
|
|
| data = {
|
| "id": self.current_id.strip(),
|
| "title": self.current_title.strip(),
|
| "text": cleaned_text
|
| }
|
|
|
| self.output_file.write(json.dumps(data, ensure_ascii=False) + '\n')
|
| self.article_count += 1
|
|
|
| if self.article_count % self.logging_interval == 0:
|
| logging.info(f"Processed {self.article_count} articles...")
|
|
|
| def get_latest_dump_url():
|
| logging.info(f"Checking for latest dump date at: {BASE_URL}")
|
|
|
| try:
|
| response = requests.get(BASE_URL)
|
| response.raise_for_status()
|
| except requests.exceptions.RequestException as e:
|
| logging.error(f"Failed to access Wikimedia dumps: {e}")
|
| return None
|
|
|
| soup = BeautifulSoup(response.content, 'html.parser')
|
| date_links = []
|
|
|
| for link in soup.find_all('a'):
|
| href = link.get('href')
|
| if href and href.endswith('/') and href.strip('/').isdigit() and len(href.strip('/')) == 8:
|
| date_links.append(href.strip('/'))
|
|
|
| if not date_links:
|
| logging.error("Could not find any date folders. Aborting.")
|
| return None
|
|
|
| latest_date = max(date_links)
|
| logging.info(f"Latest dump date found: {latest_date}")
|
|
|
| latest_file_url = f'{BASE_URL}{latest_date}/{FILE_PATTERN.replace("*", latest_date)}'
|
| logging.info(f"Final dump URL: {latest_file_url}")
|
| return latest_file_url
|
|
|
| def download_file(url, target_path):
|
| logging.info(f"Starting download to {target_path}...")
|
| try:
|
| with requests.get(url, stream=True) as r:
|
| r.raise_for_status()
|
| with open(target_path, 'wb') as f:
|
| shutil.copyfileobj(r.raw, f)
|
| logging.info("Download complete.")
|
| return True
|
| except requests.exceptions.RequestException as e:
|
| logging.error(f"Download failed: {e}")
|
| return False
|
|
|
| def process_and_package_sax(dump_path):
|
| logging.info("Starting SAX parsing and packaging...")
|
|
|
| try:
|
| with gzip.open(FINAL_FILENAME, 'wt', encoding='utf-8') as outfile:
|
| with bz2.open(dump_path, 'rt', encoding='utf-8') as infile:
|
|
|
| parser = xml.sax.make_parser()
|
| handler = WikiDumpHandler(outfile)
|
| parser.setContentHandler(handler)
|
|
|
| parser.parse(infile)
|
|
|
| logging.info(f"Successfully created final package: **{FINAL_FILENAME}** containing {handler.article_count} articles.")
|
| return True
|
| except Exception as e:
|
| logging.error(f"Error during SAX processing: {e}")
|
| return False
|
|
|
| def main():
|
| dump_url = get_latest_dump_url()
|
| if not dump_url:
|
| return
|
|
|
| if not os.path.exists(OUTPUT_DIR):
|
| os.makedirs(OUTPUT_DIR)
|
|
|
| raw_dump_path = os.path.join(OUTPUT_DIR, os.path.basename(dump_url))
|
|
|
| if not download_file(dump_url, raw_dump_path):
|
| return
|
|
|
| if not process_and_package_sax(raw_dump_path):
|
| return
|
|
|
| shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
|
| logging.info("--- ✅ Process finished successfully! ---")
|
|
|
| if __name__ == '__main__':
|
| main() |