beshiribrahim commited on
Commit
c949405
·
verified ·
1 Parent(s): 26bc207

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +206 -1
README.md CHANGED
@@ -66,4 +66,209 @@ dataset = load_dataset("your_username/tigre_wikipedia")
66
 
67
  # Example: Accessing the first article
68
  print(dataset['train'][0]['title'])
69
- print(dataset['train'][0]['text'][:500] + "...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
  # Example: Accessing the first article
68
  print(dataset['train'][0]['title'])
69
+ print(dataset['train'][0]['text'][:500] + "...")
70
+
71
+
72
+ ## ⚙️ Data Generation Pipeline (Reproducibility)
73
+
74
+ The data in this repository is generated automatically from the raw Wikimedia dumps to ensure it is always the latest, cleanest version. This process is built using **standard Python libraries** to maximize stability and compatibility across different environments.
75
+
76
+ ### Processing Method
77
+
78
+ The corpus is created using a custom Python script that follows a resource-efficient **streaming (SAX) approach**:
79
+
80
+ 1. **Source:** The raw XML dump (`tigwiki-*-pages-articles-multistream.xml.bz2`) is downloaded from the official Wikimedia dumps repository.
81
+ 2. **Extraction:** A **SAX (Streaming API for XML) parser** is used to read the highly compressed BZ2 file chunk-by-chunk. This is crucial for handling large files efficiently without requiring the entire dataset to be loaded into memory.
82
+ 3. **Cleaning/Filtering:** The custom parser extracts the article ID, title, and text content. Articles belonging to non-main namespaces (e.g., "User:", "Talk:", "Template:") are automatically filtered out using the presence of a colon (`:`) in the title.
83
+ 4. **Output:** The clean text is written directly into the final compressed **JSON Lines (`.jsonl.gz`)** file.
84
+
85
+ ### Obtain the Latest Corpus (Automation Script)
86
+
87
+ Volunteers are continuously adding new content to tig.wikipedia.org. If you need to obtain the latest version of the corpus as a single `jsonl.gz` file containing the entire content, you can run the script provided below.
88
+
89
+ **1. Install Prerequisites:**
90
+ ```bash
91
+ pip install requests beautifulsoup4
92
+
93
+ ```python
94
+ import requests
95
+ from bs4 import BeautifulSoup
96
+ import os
97
+ import gzip
98
+ import shutil
99
+ import logging
100
+ import json
101
+ import bz2
102
+ import xml.sax
103
+
104
+ # --- Configuration (Tigre - tigwiki) ---
105
+ WIKI_CODE = 'tigwiki'
106
+ BASE_URL = f'[https://dumps.wikimedia.org/](https://dumps.wikimedia.org/){WIKI_CODE}/'
107
+ FILE_PATTERN = f'{WIKI_CODE}-*-pages-articles-multistream.xml.bz2'
108
+ OUTPUT_DIR = 'tigre_raw_dump'
109
+ FINAL_FILENAME = 'tigre_wikipedia.jsonl.gz'
110
+ # ---------------------
111
+
112
+ logging.basicConfig(level=logging.INFO,
113
+ format='%(asctime)s - %(levelname)s - %(message)s')
114
+
115
+ # --- 1. XML Handler Class ---
116
+
117
+ class WikiDumpHandler(xml.sax.ContentHandler):
118
+ """
119
+ SAX handler to parse Wikipedia XML and extract title and text.
120
+ SAX is used because it's a streaming parser, ideal for huge files.
121
+ """
122
+ def __init__(self, output_file):
123
+ super().__init__()
124
+ self.output_file = output_file
125
+ self.current_tag = ""
126
+ self.in_page = False
127
+ self.in_revision = False
128
+ self.in_text = False
129
+ self.in_title = False
130
+ self.current_title = ""
131
+ self.current_text = ""
132
+ self.current_id = ""
133
+ self.article_count = 0
134
+ self.logging_interval = 500 # Log every 500 articles
135
+
136
+ def startElement(self, name, attrs):
137
+ self.current_tag = name
138
+ if name == "page":
139
+ self.in_page = True
140
+ self.current_title = ""
141
+ self.current_id = ""
142
+ elif name == "revision":
143
+ self.in_revision = True
144
+ elif name == "text":
145
+ self.in_text = True
146
+ self.current_text = ""
147
+ elif name == "title":
148
+ self.in_title = True
149
+
150
+ def characters(self, content):
151
+ if self.in_page:
152
+ if self.in_title:
153
+ self.current_title += content
154
+ elif self.current_tag == "id" and not self.current_id:
155
+ self.current_id = content
156
+ elif self.in_revision and self.in_text:
157
+ self.current_text += content
158
+
159
+ def endElement(self, name):
160
+ if name == "text":
161
+ self.in_text = False
162
+ elif name == "title":
163
+ self.in_title = False
164
+ elif name == "revision":
165
+ self.in_revision = False
166
+ elif name == "page":
167
+ self.in_page = False
168
+ # Filter: Check for text existence and skip non-main namespaces (e.g., Talk:, User:)
169
+ if self.current_text and ":" not in self.current_title:
170
+ self.write_article()
171
+
172
+ self.current_title = ""
173
+ self.current_text = ""
174
+ self.current_id = ""
175
+
176
+ def write_article(self):
177
+ cleaned_text = self.current_text.strip()
178
+
179
+ data = {
180
+ "id": self.current_id.strip(),
181
+ "title": self.current_title.strip(),
182
+ "text": cleaned_text
183
+ }
184
+
185
+ self.output_file.write(json.dumps(data, ensure_ascii=False) + '\n')
186
+ self.article_count += 1
187
+
188
+ if self.article_count % self.logging_interval == 0:
189
+ logging.info(f"Processed {self.article_count} articles...")
190
+
191
+ # --- 2. Downloader and URL Finder ---
192
+
193
+ def get_latest_dump_url():
194
+ logging.info(f"Checking for latest dump date at: {BASE_URL}")
195
+
196
+ try:
197
+ response = requests.get(BASE_URL)
198
+ response.raise_for_status()
199
+ except requests.exceptions.RequestException as e:
200
+ logging.error(f"Failed to access Wikimedia dumps: {e}")
201
+ return None
202
+
203
+ soup = BeautifulSoup(response.content, 'html.parser')
204
+ date_links = []
205
+
206
+ for link in soup.find_all('a'):
207
+ href = link.get('href')
208
+ if href and href.endswith('/') and href.strip('/').isdigit() and len(href.strip('/')) == 8:
209
+ date_links.append(href.strip('/'))
210
+
211
+ if not date_links:
212
+ logging.error("Could not find any date folders. Aborting.")
213
+ return None
214
+
215
+ latest_date = max(date_links)
216
+ logging.info(f"Latest dump date found: {latest_date}")
217
+
218
+ latest_file_url = f'{BASE_URL}{latest_date}/{FILE_PATTERN.replace("*", latest_date)}'
219
+ logging.info(f"Final dump URL: {latest_file_url}")
220
+ return latest_file_url
221
+
222
+ def download_file(url, target_path):
223
+ logging.info(f"Starting download to {target_path}...")
224
+ try:
225
+ with requests.get(url, stream=True) as r:
226
+ r.raise_for_status()
227
+ with open(target_path, 'wb') as f:
228
+ shutil.copyfileobj(r.raw, f)
229
+ logging.info("Download complete.")
230
+ return True
231
+ except requests.exceptions.RequestException as e:
232
+ logging.error(f"Download failed: {e}")
233
+ return False
234
+
235
+ # --- 3. Main Processing Function ---
236
+
237
+ def process_and_package_sax(dump_path):
238
+ logging.info("Starting SAX parsing and packaging...")
239
+
240
+ try:
241
+ with gzip.open(FINAL_FILENAME, 'wt', encoding='utf-8') as outfile:
242
+ with bz2.open(dump_path, 'rt', encoding='utf-8') as infile:
243
+
244
+ parser = xml.sax.make_parser()
245
+ handler = WikiDumpHandler(outfile)
246
+ parser.setContentHandler(handler)
247
+
248
+ parser.parse(infile)
249
+
250
+ logging.info(f"Successfully created final package: **{FINAL_FILENAME}** containing {handler.article_count} articles.")
251
+ return True
252
+ except Exception as e:
253
+ logging.error(f"Error during SAX processing: {e}")
254
+ return False
255
+
256
+
257
+ def main():
258
+ dump_url = get_latest_dump_url()
259
+ if not dump_url:
260
+ return
261
+
262
+ raw_dump_path = os.path.join(OUTPUT_DIR, os.path.basename(dump_url))
263
+
264
+ if not download_file(dump_url, raw_dump_path):
265
+ return
266
+
267
+ if not process_and_package_sax(raw_dump_path):
268
+ return
269
+
270
+ shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
271
+ logging.info("--- ✅ Process finished successfully! ---")
272
+
273
+ if __name__ == '__main__':
274
+ main()