Update README.md
Browse files
README.md
CHANGED
|
@@ -1,274 +1,136 @@
|
|
| 1 |
---
|
| 2 |
license: cc-by-sa-4.0
|
|
|
|
| 3 |
tags:
|
| 4 |
-
- wikipedia
|
| 5 |
-
- tigre
|
| 6 |
-
- tig
|
| 7 |
-
- corpus
|
| 8 |
-
- machine learning
|
| 9 |
-
- low-resource
|
| 10 |
language:
|
| 11 |
-
- tig
|
| 12 |
---
|
| 13 |
|
| 14 |
-
#
|
| 15 |
|
| 16 |
-
|
| 17 |
|
| 18 |
-
This
|
| 19 |
|
| 20 |
-
|
| 21 |
|
| 22 |
-
|
| 23 |
|
| 24 |
-
|
| 25 |
|
| 26 |
-
|
| 27 |
|
| 28 |
-
|
| 29 |
-
* Tourism
|
| 30 |
-
* Art and Culture
|
| 31 |
-
* Politics
|
| 32 |
-
* Books
|
| 33 |
-
* Science and Technology
|
| 34 |
-
* History
|
| 35 |
-
* Health
|
| 36 |
-
* Biography
|
| 37 |
-
* Miscellaneous
|
| 38 |
-
* Sport
|
| 39 |
-
* Fun
|
| 40 |
-
* Reference
|
| 41 |
-
* Tigre Language
|
| 42 |
-
* Food and Drinks
|
| 43 |
-
* Pictures
|
| 44 |
|
| 45 |
-
|
| 46 |
|
| 47 |
-
|
| 48 |
|
| 49 |
-
###
|
| 50 |
|
| 51 |
-
|
| 52 |
-
| :--- | :--- | :--- |
|
| 53 |
-
| **id** | `string` | The unique ID of the Wikipedia article. |
|
| 54 |
-
| **title** | `string` | The title of the article (e.g., "History of Eritrea"). |
|
| 55 |
-
| **text** | `string` | The **cleaned, plain text content** of the article (Wikitext and metadata removed). |
|
| 56 |
|
| 57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
|
| 59 |
-
|
| 60 |
|
| 61 |
-
|
| 62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
-
|
| 65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
|
|
|
| 70 |
|
|
|
|
| 71 |
|
| 72 |
-
##
|
| 73 |
|
| 74 |
-
|
|
|
|
|
|
|
| 75 |
|
| 76 |
-
|
| 77 |
|
| 78 |
-
|
| 79 |
|
| 80 |
-
|
| 81 |
-
2. **Extraction:** A **SAX (Streaming API for XML) parser** is used to read the highly compressed BZ2 file chunk-by-chunk. This is crucial for handling large files efficiently without requiring the entire dataset to be loaded into memory.
|
| 82 |
-
3. **Cleaning/Filtering:** The custom parser extracts the article ID, title, and text content. Articles belonging to non-main namespaces (e.g., "User:", "Talk:", "Template:") are automatically filtered out using the presence of a colon (`:`) in the title.
|
| 83 |
-
4. **Output:** The clean text is written directly into the final compressed **JSON Lines (`.jsonl.gz`)** file.
|
| 84 |
|
| 85 |
-
|
|
|
|
| 86 |
|
| 87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
|
| 89 |
-
**1. Install Prerequisites:**
|
| 90 |
```bash
|
| 91 |
pip install requests beautifulsoup4
|
|
|
|
| 92 |
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
"""
|
| 119 |
-
SAX handler to parse Wikipedia XML and extract title and text.
|
| 120 |
-
SAX is used because it's a streaming parser, ideal for huge files.
|
| 121 |
-
"""
|
| 122 |
-
def __init__(self, output_file):
|
| 123 |
-
super().__init__()
|
| 124 |
-
self.output_file = output_file
|
| 125 |
-
self.current_tag = ""
|
| 126 |
-
self.in_page = False
|
| 127 |
-
self.in_revision = False
|
| 128 |
-
self.in_text = False
|
| 129 |
-
self.in_title = False
|
| 130 |
-
self.current_title = ""
|
| 131 |
-
self.current_text = ""
|
| 132 |
-
self.current_id = ""
|
| 133 |
-
self.article_count = 0
|
| 134 |
-
self.logging_interval = 500 # Log every 500 articles
|
| 135 |
-
|
| 136 |
-
def startElement(self, name, attrs):
|
| 137 |
-
self.current_tag = name
|
| 138 |
-
if name == "page":
|
| 139 |
-
self.in_page = True
|
| 140 |
-
self.current_title = ""
|
| 141 |
-
self.current_id = ""
|
| 142 |
-
elif name == "revision":
|
| 143 |
-
self.in_revision = True
|
| 144 |
-
elif name == "text":
|
| 145 |
-
self.in_text = True
|
| 146 |
-
self.current_text = ""
|
| 147 |
-
elif name == "title":
|
| 148 |
-
self.in_title = True
|
| 149 |
-
|
| 150 |
-
def characters(self, content):
|
| 151 |
-
if self.in_page:
|
| 152 |
-
if self.in_title:
|
| 153 |
-
self.current_title += content
|
| 154 |
-
elif self.current_tag == "id" and not self.current_id:
|
| 155 |
-
self.current_id = content
|
| 156 |
-
elif self.in_revision and self.in_text:
|
| 157 |
-
self.current_text += content
|
| 158 |
-
|
| 159 |
-
def endElement(self, name):
|
| 160 |
-
if name == "text":
|
| 161 |
-
self.in_text = False
|
| 162 |
-
elif name == "title":
|
| 163 |
-
self.in_title = False
|
| 164 |
-
elif name == "revision":
|
| 165 |
-
self.in_revision = False
|
| 166 |
-
elif name == "page":
|
| 167 |
-
self.in_page = False
|
| 168 |
-
# Filter: Check for text existence and skip non-main namespaces (e.g., Talk:, User:)
|
| 169 |
-
if self.current_text and ":" not in self.current_title:
|
| 170 |
-
self.write_article()
|
| 171 |
-
|
| 172 |
-
self.current_title = ""
|
| 173 |
-
self.current_text = ""
|
| 174 |
-
self.current_id = ""
|
| 175 |
-
|
| 176 |
-
def write_article(self):
|
| 177 |
-
cleaned_text = self.current_text.strip()
|
| 178 |
-
|
| 179 |
-
data = {
|
| 180 |
-
"id": self.current_id.strip(),
|
| 181 |
-
"title": self.current_title.strip(),
|
| 182 |
-
"text": cleaned_text
|
| 183 |
-
}
|
| 184 |
-
|
| 185 |
-
self.output_file.write(json.dumps(data, ensure_ascii=False) + '\n')
|
| 186 |
-
self.article_count += 1
|
| 187 |
-
|
| 188 |
-
if self.article_count % self.logging_interval == 0:
|
| 189 |
-
logging.info(f"Processed {self.article_count} articles...")
|
| 190 |
-
|
| 191 |
-
# --- 2. Downloader and URL Finder ---
|
| 192 |
-
|
| 193 |
-
def get_latest_dump_url():
|
| 194 |
-
logging.info(f"Checking for latest dump date at: {BASE_URL}")
|
| 195 |
-
|
| 196 |
-
try:
|
| 197 |
-
response = requests.get(BASE_URL)
|
| 198 |
-
response.raise_for_status()
|
| 199 |
-
except requests.exceptions.RequestException as e:
|
| 200 |
-
logging.error(f"Failed to access Wikimedia dumps: {e}")
|
| 201 |
-
return None
|
| 202 |
-
|
| 203 |
-
soup = BeautifulSoup(response.content, 'html.parser')
|
| 204 |
-
date_links = []
|
| 205 |
-
|
| 206 |
-
for link in soup.find_all('a'):
|
| 207 |
-
href = link.get('href')
|
| 208 |
-
if href and href.endswith('/') and href.strip('/').isdigit() and len(href.strip('/')) == 8:
|
| 209 |
-
date_links.append(href.strip('/'))
|
| 210 |
-
|
| 211 |
-
if not date_links:
|
| 212 |
-
logging.error("Could not find any date folders. Aborting.")
|
| 213 |
-
return None
|
| 214 |
-
|
| 215 |
-
latest_date = max(date_links)
|
| 216 |
-
logging.info(f"Latest dump date found: {latest_date}")
|
| 217 |
-
|
| 218 |
-
latest_file_url = f'{BASE_URL}{latest_date}/{FILE_PATTERN.replace("*", latest_date)}'
|
| 219 |
-
logging.info(f"Final dump URL: {latest_file_url}")
|
| 220 |
-
return latest_file_url
|
| 221 |
-
|
| 222 |
-
def download_file(url, target_path):
|
| 223 |
-
logging.info(f"Starting download to {target_path}...")
|
| 224 |
-
try:
|
| 225 |
-
with requests.get(url, stream=True) as r:
|
| 226 |
-
r.raise_for_status()
|
| 227 |
-
with open(target_path, 'wb') as f:
|
| 228 |
-
shutil.copyfileobj(r.raw, f)
|
| 229 |
-
logging.info("Download complete.")
|
| 230 |
-
return True
|
| 231 |
-
except requests.exceptions.RequestException as e:
|
| 232 |
-
logging.error(f"Download failed: {e}")
|
| 233 |
-
return False
|
| 234 |
-
|
| 235 |
-
# --- 3. Main Processing Function ---
|
| 236 |
-
|
| 237 |
-
def process_and_package_sax(dump_path):
|
| 238 |
-
logging.info("Starting SAX parsing and packaging...")
|
| 239 |
-
|
| 240 |
-
try:
|
| 241 |
-
with gzip.open(FINAL_FILENAME, 'wt', encoding='utf-8') as outfile:
|
| 242 |
-
with bz2.open(dump_path, 'rt', encoding='utf-8') as infile:
|
| 243 |
-
|
| 244 |
-
parser = xml.sax.make_parser()
|
| 245 |
-
handler = WikiDumpHandler(outfile)
|
| 246 |
-
parser.setContentHandler(handler)
|
| 247 |
-
|
| 248 |
-
parser.parse(infile)
|
| 249 |
-
|
| 250 |
-
logging.info(f"Successfully created final package: **{FINAL_FILENAME}** containing {handler.article_count} articles.")
|
| 251 |
-
return True
|
| 252 |
-
except Exception as e:
|
| 253 |
-
logging.error(f"Error during SAX processing: {e}")
|
| 254 |
-
return False
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
def main():
|
| 258 |
-
dump_url = get_latest_dump_url()
|
| 259 |
-
if not dump_url:
|
| 260 |
-
return
|
| 261 |
-
|
| 262 |
-
raw_dump_path = os.path.join(OUTPUT_DIR, os.path.basename(dump_url))
|
| 263 |
-
|
| 264 |
-
if not download_file(dump_url, raw_dump_path):
|
| 265 |
-
return
|
| 266 |
-
|
| 267 |
-
if not process_and_package_sax(raw_dump_path):
|
| 268 |
-
return
|
| 269 |
-
|
| 270 |
-
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
|
| 271 |
-
logging.info("--- ✅ Process finished successfully! ---")
|
| 272 |
-
|
| 273 |
-
if __name__ == '__main__':
|
| 274 |
-
main()
|
|
|
|
| 1 |
---
|
| 2 |
license: cc-by-sa-4.0
|
| 3 |
+
pretty_name: Tigre Wikipedia Corpus
|
| 4 |
tags:
|
| 5 |
+
- wikipedia
|
| 6 |
+
- tigre
|
| 7 |
+
- tig
|
| 8 |
+
- corpus
|
| 9 |
+
- machine learning
|
| 10 |
+
- low-resource
|
| 11 |
language:
|
| 12 |
+
- tig
|
| 13 |
---
|
| 14 |
|
| 15 |
+
# Tigre Wikipedia Corpus (tigwiki)
|
| 16 |
|
| 17 |
+
### Overview
|
| 18 |
|
| 19 |
+
This repository houses the **Tigre Wikipedia Corpus**, a foundational linguistic resource containing all non-template articles from **https://tig.wikipedia.org**.
|
| 20 |
|
| 21 |
+
**Tigre** is an under-resourced South Semitic language within the Afro-Asiatic family. This dataset serves as a critical component for bridging the digital divide, facilitating the development of Natural Language Processing (NLP) models—including Language Models (LMs), Machine Translation (MT) systems, and text generation tools—specifically tailored for the Tigre community.
|
| 22 |
|
| 23 |
+
### Background & Scope
|
| 24 |
|
| 25 |
+
The Tigre language Wikipedia was officially **approved and launched in December 2024**, marking a significant milestone for the language's digital presence. This corpus represents the collective effort of the **Tigre diaspora community**, with contributions from over twenty dedicated volunteers who actively build and maintain this comprehensive digital knowledge base.
|
| 26 |
|
| 27 |
+
---
|
| 28 |
|
| 29 |
+
## Included Data & Coverage
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
+
### **Data Modalities**
|
| 32 |
|
| 33 |
+
This repository contains **Monolingual Text** extracted directly from Wikipedia articles.
|
| 34 |
|
| 35 |
+
### **Domain Coverage**
|
| 36 |
|
| 37 |
+
The corpus encompasses a diverse range of topics, reflecting the broad scope of the encyclopedia. Key sections include:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
+
- **Culture & Heritage:** Art, Culture, Food, and Drinks
|
| 40 |
+
- **Geography & Travel:** "Let's explore our country," Tourism
|
| 41 |
+
- **STEM:** Science, Technology, Health
|
| 42 |
+
- **Humanities:** History, Politics, Biography, Literature (Books)
|
| 43 |
+
- **General Interest:** Sport, Entertainment ("Fun"), Miscellaneous
|
| 44 |
|
| 45 |
+
---
|
| 46 |
|
| 47 |
+
## Dataset Structure
|
| 48 |
+
|
| 49 |
+
The corpus is provided as a single, compressed JSON Lines file (`tigre_wikipedia.jsonl.gz`). This format is efficient for streaming and compatibility with standard NLP libraries.
|
| 50 |
+
|
| 51 |
+
```
|
| 52 |
+
tigre-data-wikipedia/
|
| 53 |
+
├── README.md
|
| 54 |
+
├── build_corpus.py
|
| 55 |
+
└── tigre_wikipedia.jsonl.gz
|
| 56 |
+
```
|
| 57 |
+
|
| 58 |
+
### **Data Fields**
|
| 59 |
+
|
| 60 |
+
| Field | Type | Description |
|
| 61 |
+
|-------|--------|-------------|
|
| 62 |
+
| id | string | Unique ID of the article |
|
| 63 |
+
| title | string | Article title |
|
| 64 |
+
| text | string | Cleaned plain-text content |
|
| 65 |
|
| 66 |
+
---
|
| 67 |
+
|
| 68 |
+
## Data Provenance & Methodology
|
| 69 |
+
|
| 70 |
+
### **Data Generation Pipeline**
|
| 71 |
+
|
| 72 |
+
The data is generated from official Wikimedia XML dumps using a custom SAX-based streaming parser. Steps include:
|
| 73 |
|
| 74 |
+
1. **Source:** Downloads raw XML dumps.
|
| 75 |
+
2. **Extraction:** Processes compressed files efficiently.
|
| 76 |
+
3. **Filtering:** Removes non-main namespaces (User:, Talk:, Template:).
|
| 77 |
+
4. **Output:** Serializes clean text into JSONL format.
|
| 78 |
|
| 79 |
+
---
|
| 80 |
|
| 81 |
+
## Bias, Risks & Known Limitations
|
| 82 |
|
| 83 |
+
- **Community Bias:** Overrepresentation of diaspora-relevant topics.
|
| 84 |
+
- **Domain Bias:** Overemphasis on formal encyclopedic style.
|
| 85 |
+
- **Size Limitations:** As a newer Wikipedia (Dec 2024), content volume is still growing.
|
| 86 |
|
| 87 |
+
---
|
| 88 |
|
| 89 |
+
## How to Use
|
| 90 |
|
| 91 |
+
### **Loading via Hugging Face**
|
|
|
|
|
|
|
|
|
|
| 92 |
|
| 93 |
+
```python
|
| 94 |
+
from datasets import load_dataset
|
| 95 |
|
| 96 |
+
dataset = load_dataset("BeitTigreAI/tigre-data-wikipedia", split="train")
|
| 97 |
+
|
| 98 |
+
print(dataset[0]["title"])
|
| 99 |
+
print(dataset[0]["text"][:200])
|
| 100 |
+
```
|
| 101 |
+
|
| 102 |
+
---
|
| 103 |
+
|
| 104 |
+
## Reproducibility: Build Latest Version
|
| 105 |
+
|
| 106 |
+
### 1. Install Requirements
|
| 107 |
|
|
|
|
| 108 |
```bash
|
| 109 |
pip install requests beautifulsoup4
|
| 110 |
+
```
|
| 111 |
|
| 112 |
+
### 2. Run Script
|
| 113 |
+
|
| 114 |
+
```bash
|
| 115 |
+
python build_corpus.py
|
| 116 |
+
```
|
| 117 |
+
|
| 118 |
+
---
|
| 119 |
+
|
| 120 |
+
## Licensing
|
| 121 |
+
|
| 122 |
+
Licensed under **CC BY-SA 4.0**.
|
| 123 |
+
|
| 124 |
+
---
|
| 125 |
+
|
| 126 |
+
## Citation
|
| 127 |
+
|
| 128 |
+
```
|
| 129 |
+
@misc{tigre-wikipedia-corpus,
|
| 130 |
+
author = {BeitTigreAI},
|
| 131 |
+
title = {Tigre Wikipedia (tigwiki) Corpus},
|
| 132 |
+
year = {2025},
|
| 133 |
+
publisher = {Hugging Face},
|
| 134 |
+
howpublished = {https://huggingface.co/datasets/BeitTigreAI/tigre-data-wikipedia}
|
| 135 |
+
}
|
| 136 |
+
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|