beshiribrahim commited on
Commit
112b8fd
·
verified ·
1 Parent(s): c949405

Upload build_corpus.py

Browse files
Files changed (1) hide show
  1. build_corpus.py +177 -0
build_corpus.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from bs4 import BeautifulSoup
3
+ import os
4
+ import gzip
5
+ import shutil
6
+ import logging
7
+ import json
8
+ import bz2
9
+ import xml.sax
10
+
11
+ # --- Configuration (Tigre - tigwiki) ---
12
+ WIKI_CODE = 'tigwiki'
13
+ BASE_URL = f'https://dumps.wikimedia.org/{WIKI_CODE}/'
14
+ FILE_PATTERN = f'{WIKI_CODE}-*-pages-articles-multistream.xml.bz2'
15
+ OUTPUT_DIR = 'tigre_raw_dump'
16
+ FINAL_FILENAME = 'tigre_wikipedia.jsonl.gz'
17
+ # ---------------------
18
+
19
+ logging.basicConfig(level=logging.INFO,
20
+ format='%(asctime)s - %(levelname)s - %(message)s')
21
+
22
+ class WikiDumpHandler(xml.sax.ContentHandler):
23
+ """
24
+ SAX handler to parse Wikipedia XML and extract title and text.
25
+ SAX is used because it's a streaming parser, ideal for huge files.
26
+ """
27
+ def __init__(self, output_file):
28
+ super().__init__()
29
+ self.output_file = output_file
30
+ self.current_tag = ""
31
+ self.in_page = False
32
+ self.in_revision = False
33
+ self.in_text = False
34
+ self.in_title = False
35
+ self.current_title = ""
36
+ self.current_text = ""
37
+ self.current_id = ""
38
+ self.article_count = 0
39
+ self.logging_interval = 500 # Log every 500 articles
40
+
41
+ def startElement(self, name, attrs):
42
+ self.current_tag = name
43
+ if name == "page":
44
+ self.in_page = True
45
+ self.current_title = ""
46
+ self.current_id = ""
47
+ elif name == "revision":
48
+ self.in_revision = True
49
+ elif name == "text":
50
+ self.in_text = True
51
+ self.current_text = ""
52
+ elif name == "title":
53
+ self.in_title = True
54
+
55
+ def characters(self, content):
56
+ if self.in_page:
57
+ if self.in_title:
58
+ self.current_title += content
59
+ elif self.current_tag == "id" and not self.current_id:
60
+ self.current_id = content
61
+ elif self.in_revision and self.in_text:
62
+ self.current_text += content
63
+
64
+ def endElement(self, name):
65
+ if name == "text":
66
+ self.in_text = False
67
+ elif name == "title":
68
+ self.in_title = False
69
+ elif name == "revision":
70
+ self.in_revision = False
71
+ elif name == "page":
72
+ self.in_page = False
73
+ # Filter: Check for text existence and skip non-main namespaces (e.g., Talk:, User:)
74
+ if self.current_text and ":" not in self.current_title:
75
+ self.write_article()
76
+
77
+ self.current_title = ""
78
+ self.current_text = ""
79
+ self.current_id = ""
80
+
81
+ def write_article(self):
82
+ cleaned_text = self.current_text.strip()
83
+
84
+ data = {
85
+ "id": self.current_id.strip(),
86
+ "title": self.current_title.strip(),
87
+ "text": cleaned_text
88
+ }
89
+
90
+ self.output_file.write(json.dumps(data, ensure_ascii=False) + '\n')
91
+ self.article_count += 1
92
+
93
+ if self.article_count % self.logging_interval == 0:
94
+ logging.info(f"Processed {self.article_count} articles...")
95
+
96
+ def get_latest_dump_url():
97
+ logging.info(f"Checking for latest dump date at: {BASE_URL}")
98
+
99
+ try:
100
+ response = requests.get(BASE_URL)
101
+ response.raise_for_status()
102
+ except requests.exceptions.RequestException as e:
103
+ logging.error(f"Failed to access Wikimedia dumps: {e}")
104
+ return None
105
+
106
+ soup = BeautifulSoup(response.content, 'html.parser')
107
+ date_links = []
108
+
109
+ for link in soup.find_all('a'):
110
+ href = link.get('href')
111
+ if href and href.endswith('/') and href.strip('/').isdigit() and len(href.strip('/')) == 8:
112
+ date_links.append(href.strip('/'))
113
+
114
+ if not date_links:
115
+ logging.error("Could not find any date folders. Aborting.")
116
+ return None
117
+
118
+ latest_date = max(date_links)
119
+ logging.info(f"Latest dump date found: {latest_date}")
120
+
121
+ latest_file_url = f'{BASE_URL}{latest_date}/{FILE_PATTERN.replace("*", latest_date)}'
122
+ logging.info(f"Final dump URL: {latest_file_url}")
123
+ return latest_file_url
124
+
125
+ def download_file(url, target_path):
126
+ logging.info(f"Starting download to {target_path}...")
127
+ try:
128
+ with requests.get(url, stream=True) as r:
129
+ r.raise_for_status()
130
+ with open(target_path, 'wb') as f:
131
+ shutil.copyfileobj(r.raw, f)
132
+ logging.info("Download complete.")
133
+ return True
134
+ except requests.exceptions.RequestException as e:
135
+ logging.error(f"Download failed: {e}")
136
+ return False
137
+
138
+ def process_and_package_sax(dump_path):
139
+ logging.info("Starting SAX parsing and packaging...")
140
+
141
+ try:
142
+ with gzip.open(FINAL_FILENAME, 'wt', encoding='utf-8') as outfile:
143
+ with bz2.open(dump_path, 'rt', encoding='utf-8') as infile:
144
+
145
+ parser = xml.sax.make_parser()
146
+ handler = WikiDumpHandler(outfile)
147
+ parser.setContentHandler(handler)
148
+
149
+ parser.parse(infile)
150
+
151
+ logging.info(f"Successfully created final package: **{FINAL_FILENAME}** containing {handler.article_count} articles.")
152
+ return True
153
+ except Exception as e:
154
+ logging.error(f"Error during SAX processing: {e}")
155
+ return False
156
+
157
+ def main():
158
+ dump_url = get_latest_dump_url()
159
+ if not dump_url:
160
+ return
161
+
162
+ if not os.path.exists(OUTPUT_DIR):
163
+ os.makedirs(OUTPUT_DIR)
164
+
165
+ raw_dump_path = os.path.join(OUTPUT_DIR, os.path.basename(dump_url))
166
+
167
+ if not download_file(dump_url, raw_dump_path):
168
+ return
169
+
170
+ if not process_and_package_sax(raw_dump_path):
171
+ return
172
+
173
+ shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
174
+ logging.info("--- ✅ Process finished successfully! ---")
175
+
176
+ if __name__ == '__main__':
177
+ main()