UVW-2026 / scripts /prepare_huggingface.py
rain1024's picture
Add Wikidata enrichment and quality scoring scripts
a0a7929
#!/usr/bin/env python3
"""
Prepare UVW 2026 dataset for HuggingFace Hub upload.
UVW 2026: Underthesea Vietnamese Wikipedia Dataset
https://github.com/undertheseanlp/underthesea/issues/896
Uses streaming to handle large datasets efficiently.
Supports direct upload to HuggingFace Hub with --push flag.
"""
import argparse
import json
from datetime import datetime
from pathlib import Path
from tqdm import tqdm
try:
from datasets import Dataset, DatasetDict, Features, Value
HF_AVAILABLE = True
except ImportError:
HF_AVAILABLE = False
try:
from huggingface_hub import HfApi, login
HF_HUB_AVAILABLE = True
except ImportError:
HF_HUB_AVAILABLE = False
SPLITS_DIR = Path(__file__).parent.parent / "data" / "splits"
HF_DIR = Path(__file__).parent.parent / "data" / "huggingface"
def count_lines(path: Path) -> int:
"""Count lines without loading into memory."""
count = 0
with open(path, "r", encoding="utf-8") as f:
for _ in f:
count += 1
return count
def iter_jsonl(path: Path):
"""Iterate over JSONL file without loading all into memory."""
with open(path, "r", encoding="utf-8") as f:
for line in f:
yield json.loads(line)
def create_hf_dataset():
"""Create HuggingFace dataset from splits using streaming."""
if not HF_AVAILABLE:
print("Cannot create HuggingFace dataset: datasets library not installed")
print("Install with: uv sync --extra huggingface")
return
# Define features (including new wikidata fields)
features = Features({
"id": Value("string"),
"title": Value("string"),
"content": Value("string"),
"num_chars": Value("int32"),
"num_sentences": Value("int32"),
"quality_score": Value("int32"),
"wikidata_id": Value("string"),
"main_category": Value("string"),
})
# Load splits using generator for memory efficiency
splits = {}
split_counts = {}
for split_name in ["train", "dev", "test"]:
jsonl_path = SPLITS_DIR / f"{split_name}.jsonl"
if not jsonl_path.exists():
continue
# Count lines first
count = count_lines(jsonl_path)
split_counts[split_name] = count
# Load data (HuggingFace datasets handles this efficiently)
print(f" Loading {split_name}...")
data = []
for article in tqdm(iter_jsonl(jsonl_path), total=count, desc=f" {split_name}"):
# Ensure all fields exist with defaults
data.append({
"id": article.get("id", ""),
"title": article.get("title", ""),
"content": article.get("content", ""),
"num_chars": article.get("num_chars", 0),
"num_sentences": article.get("num_sentences", 0),
"quality_score": article.get("quality_score", 0),
"wikidata_id": article.get("wikidata_id") or "",
"main_category": article.get("main_category") or "",
})
# Rename 'dev' to 'validation' for HuggingFace convention
hf_split_name = "validation" if split_name == "dev" else split_name
splits[hf_split_name] = Dataset.from_list(data, features=features)
print(f" Loaded {split_name}: {count:,} examples")
# Free memory
del data
if not splits:
print("No splits found. Please run create_splits.py first.")
return
# Create DatasetDict
dataset = DatasetDict(splits)
# Save to disk
HF_DIR.mkdir(parents=True, exist_ok=True)
print(f"\nSaving dataset to disk...")
dataset.save_to_disk(HF_DIR / "uvw_2026")
print(f" Dataset saved to: {HF_DIR / 'uvw_2026'}")
# Also save as parquet for easy upload
parquet_dir = HF_DIR / "uvw_2026_parquet"
parquet_dir.mkdir(parents=True, exist_ok=True)
print(f"\nSaving parquet files...")
for split_name, split_dataset in dataset.items():
parquet_path = parquet_dir / f"{split_name}.parquet"
split_dataset.to_parquet(parquet_path)
print(f" Saved {parquet_path}")
return dataset, split_counts
def load_dataset_statistics():
"""Load statistics from processed data files."""
stats = {
"total_articles": 0,
"total_chars": 0,
"total_sentences": 0,
"with_wikidata": 0,
"with_category": 0,
"categories": {},
"quality_distribution": {i: 0 for i in range(1, 11)},
}
# Try to load from wikidata file (most complete)
wikidata_path = Path(__file__).parent.parent / "data" / "processed" / "uvw_2026_wikidata.jsonl"
if not wikidata_path.exists():
wikidata_path = Path(__file__).parent.parent / "data" / "processed" / "uvw_2026_quality.jsonl"
if not wikidata_path.exists():
wikidata_path = Path(__file__).parent.parent / "data" / "processed" / "uvw_2026.jsonl"
if wikidata_path.exists():
print(f" Loading statistics from {wikidata_path.name}...")
for article in tqdm(iter_jsonl(wikidata_path), desc=" Calculating stats"):
stats["total_articles"] += 1
stats["total_chars"] += article.get("num_chars", 0)
stats["total_sentences"] += article.get("num_sentences", 0)
if article.get("wikidata_id"):
stats["with_wikidata"] += 1
category = article.get("main_category")
if category:
stats["with_category"] += 1
stats["categories"][category] = stats["categories"].get(category, 0) + 1
quality = article.get("quality_score", 0)
if 1 <= quality <= 10:
stats["quality_distribution"][quality] += 1
return stats
def create_dataset_card(split_counts: dict = None, stats: dict = None):
"""Create README.md for HuggingFace dataset."""
# Load metadata
metadata_path = Path(__file__).parent.parent / "data" / "processed" / "metadata.json"
if metadata_path.exists():
with open(metadata_path, "r", encoding="utf-8") as f:
metadata = json.load(f)
else:
metadata = {"statistics": {}}
file_stats = metadata.get("statistics", {})
# Calculate split counts
if split_counts:
total = sum(split_counts.values())
train_count = split_counts.get("train", 0)
val_count = split_counts.get("dev", 0)
test_count = split_counts.get("test", 0)
else:
total = file_stats.get("num_articles", 0)
train_count = int(total * 0.8) if total else "80%"
val_count = int(total * 0.1) if total else "10%"
test_count = int(total * 0.1) if total else "10%"
# Use provided stats or defaults
if stats and stats["total_articles"] > 0:
wikidata_pct = (stats["with_wikidata"] / stats["total_articles"] * 100)
category_pct = (stats["with_category"] / stats["total_articles"] * 100)
unique_categories = len(stats["categories"])
avg_chars = stats["total_chars"] // stats["total_articles"]
avg_sentences = stats["total_sentences"] // stats["total_articles"]
# Get top categories
sorted_cats = sorted(stats["categories"].items(), key=lambda x: -x[1])[:10]
top_categories_table = "\n".join(
f"| {cat} | {count:,} | {count/stats['total_articles']*100:.1f}% |"
for cat, count in sorted_cats
)
# Quality distribution table
quality_table = "\n".join(
f"| {score} | {count:,} | {count/stats['total_articles']*100:.1f}% |"
for score, count in sorted(stats["quality_distribution"].items())
if count > 0
)
else:
wikidata_pct = 99.4
category_pct = 97.0
unique_categories = 11549
avg_chars = 2500
avg_sentences = 30
top_categories_table = """| đơn vị phân loại (taxon) | 618,281 | 55.3% |
| người (human) | 78,191 | 7.0% |
| xã của Pháp | 35,635 | 3.2% |
| khu định cư | 20,276 | 1.8% |
| tiểu hành tinh | 17,891 | 1.6% |
| xã của Việt Nam | 7,088 | 0.6% |"""
quality_table = """| 1 | - | - |
| 5 | - | - |
| 10 | - | - |"""
# Determine size category
if total == "N/A" or total == 0:
size_category = "n<1K"
elif total < 1000:
size_category = "n<1K"
elif total < 10000:
size_category = "1K<n<10K"
elif total < 100000:
size_category = "10K<n<100K"
elif total < 1000000:
size_category = "100K<n<1M"
else:
size_category = "1M<n<10M"
generation_date = datetime.now().strftime("%Y-%m-%d")
card_content = f'''---
language:
- vi
license: cc-by-sa-4.0
task_categories:
- text-generation
- fill-mask
- text-classification
- feature-extraction
- sentence-similarity
tags:
- wikipedia
- vietnamese
- nlp
- underthesea
- wikidata
- pretraining
- language-modeling
pretty_name: UVW 2026 - Vietnamese Wikipedia Dataset
size_categories:
- {size_category}
source_datasets:
- original
dataset_info:
features:
- name: id
dtype: string
- name: title
dtype: string
- name: content
dtype: string
- name: num_chars
dtype: int32
- name: num_sentences
dtype: int32
- name: quality_score
dtype: int32
- name: wikidata_id
dtype: string
- name: main_category
dtype: string
splits:
- name: train
num_examples: {train_count if isinstance(train_count, int) else 0}
- name: validation
num_examples: {val_count if isinstance(val_count, int) else 0}
- name: test
num_examples: {test_count if isinstance(test_count, int) else 0}
configs:
- config_name: default
data_files:
- split: train
path: train.parquet
- split: validation
path: validation.parquet
- split: test
path: test.parquet
---
# UVW 2026: Underthesea Vietnamese Wikipedia Dataset
<div align="center">
[![License: CC BY-SA 4.0](https://img.shields.io/badge/License-CC%20BY--SA%204.0-lightgrey.svg)](https://creativecommons.org/licenses/by-sa/4.0/)
[![Language: Vietnamese](https://img.shields.io/badge/Language-Vietnamese-blue.svg)](https://vi.wikipedia.org)
[![Wikidata Enriched](https://img.shields.io/badge/Wikidata-Enriched-green.svg)](https://www.wikidata.org)
</div>
## Dataset Description
**UVW 2026** (Underthesea Vietnamese Wikipedia) is a high-quality, cleaned dataset of Vietnamese Wikipedia articles enriched with Wikidata metadata. Designed for Vietnamese NLP research including language modeling, text generation, text classification, named entity recognition, and model pretraining.
### Key Features
- **Clean text**: Wikipedia markup, templates, references, and formatting removed
- **Wikidata integration**: Articles linked to Wikidata entities with semantic categories
- **Quality scoring**: Each article scored 1-10 based on content quality metrics
- **Unicode normalized**: NFC normalization applied for consistent text processing
- **Ready to use**: Pre-split into train/validation/test sets
### Dataset Summary
| Property | Value |
|----------|-------|
| **Language** | Vietnamese (vi) |
| **Source** | Vietnamese Wikipedia + Wikidata |
| **License** | CC BY-SA 4.0 |
| **Generated** | {generation_date} |
| **Total Articles** | {total:,} |
| **Wikidata Coverage** | {wikidata_pct:.1f}% |
| **Category Coverage** | {category_pct:.1f}% |
| **Unique Categories** | {unique_categories:,} |
| **Avg. Characters** | {avg_chars:,} |
| **Avg. Sentences** | {avg_sentences:,} |
## Quick Start
```python
from datasets import load_dataset
# Load the dataset
dataset = load_dataset("undertheseanlp/UVW-2026")
# Access splits
train = dataset["train"]
validation = dataset["validation"]
test = dataset["test"]
# View an example
print(train[0])
```
## Dataset Structure
### Data Splits
| Split | Examples | Description |
|-------|----------|-------------|
| `train` | {train_count:,} | Training set (80%) |
| `validation` | {val_count:,} | Validation set (10%) |
| `test` | {test_count:,} | Test set (10%) |
### Schema
```json
{{
"id": "Việt_Nam",
"title": "Việt Nam",
"content": "Việt Nam, tên chính thức là Cộng hòa Xã hội chủ nghĩa Việt Nam...",
"num_chars": 45000,
"num_sentences": 500,
"quality_score": 9,
"wikidata_id": "Q881",
"main_category": "quốc gia có chủ quyền"
}}
```
### Field Descriptions
| Field | Type | Description |
|-------|------|-------------|
| `id` | string | Unique article identifier (URL-safe title) |
| `title` | string | Human-readable article title |
| `content` | string | Cleaned article text content |
| `num_chars` | int32 | Character count of content |
| `num_sentences` | int32 | Estimated sentence count |
| `quality_score` | int32 | Quality score from 1 (lowest) to 10 (highest) |
| `wikidata_id` | string | Wikidata Q-identifier (e.g., "Q881" for Vietnam) |
| `main_category` | string | Primary category from Wikidata P31 (instance of) |
## Usage Examples
### Filter High-Quality Articles
```python
# Get articles with quality score >= 7
high_quality = dataset["train"].filter(lambda x: x["quality_score"] >= 7)
print(f"High-quality articles: {{len(high_quality):,}}")
```
### Filter by Category
```python
# Get articles about people
people = dataset["train"].filter(lambda x: x["main_category"] == "người")
print(f"Articles about people: {{len(people):,}}")
# Get articles about locations
locations = dataset["train"].filter(
lambda x: "khu định cư" in (x["main_category"] or "")
)
```
### Filter by Wikidata
```python
# Get articles with Wikidata links
with_wikidata = dataset["train"].filter(lambda x: x["wikidata_id"] != "")
# Lookup specific entity
vietnam = dataset["train"].filter(lambda x: x["wikidata_id"] == "Q881")
```
### Use for Language Modeling
```python
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("vinai/phobert-base")
def tokenize(examples):
return tokenizer(examples["content"], truncation=True, max_length=512)
tokenized = dataset["train"].map(tokenize, batched=True)
```
## Quality Score
Articles are scored 1-10 based on multiple factors:
| Component | Weight | Criteria |
|-----------|--------|----------|
| **Length** | 40% | Character count (200 - 100,000 optimal) |
| **Sentences** | 30% | Sentence count (3 - 1,000 optimal) |
| **Density** | 30% | Avg sentence length (80-150 chars optimal) |
| **Wikidata bonus** | +0.5 | Has wikidata_id |
| **Category bonus** | +0.5 | Has main_category |
| **Markup penalty** | -1 to -3 | Remaining Wikipedia markup |
### Quality Distribution
| Score | Count | Percentage |
|-------|------:|----------:|
{quality_table}
## Top Categories
| Category (Vietnamese) | Count | Percentage |
|----------------------|------:|----------:|
{top_categories_table}
## Data Processing
### Pipeline Steps
1. **Download**: Fetch Vietnamese Wikipedia XML dump from Wikimedia
2. **Extract**: Parse XML and extract article content
3. **Clean**: Remove Wikipedia markup (templates, refs, links, tables, categories)
4. **Normalize**: Apply Unicode NFC normalization
5. **Score**: Calculate quality metrics for each article
6. **Enrich**: Add Wikidata IDs and semantic categories via Wikidata API
7. **Filter**: Remove special pages, redirects, disambiguation, and short articles (<100 chars)
8. **Split**: Create train/validation/test splits (80/10/10) with seed=42
### Removed Content
- Wikipedia templates (`{{{{...}}}}`)
- References and citations (`<ref>...</ref>`)
- HTML tags and comments
- Category links (`[[Thể loại:...]]`)
- File/image links (`[[Tập tin:...]]`, `[[File:...]]`)
- Interwiki links
- Tables (`{{| ... |}}`)
- Infoboxes and navigation templates
### Reproduction
```bash
git clone https://github.com/undertheseanlp/UVW-2026
cd UVW-2026
uv sync --extra huggingface
# Run full pipeline
uv run python scripts/build_dataset.py
# Or run individual steps
uv run python scripts/download_wikipedia.py
uv run python scripts/extract_articles.py
uv run python scripts/wikipedia_quality_score.py
uv run python scripts/add_wikidata.py
uv run python scripts/create_splits.py
uv run python scripts/prepare_huggingface.py --push
```
## Citation
```bibtex
@dataset{{uvw2026,
title = {{UVW 2026: Underthesea Vietnamese Wikipedia Dataset}},
author = {{Underthesea NLP}},
year = {{2026}},
publisher = {{Hugging Face}},
url = {{https://huggingface.co/datasets/undertheseanlp/UVW-2026}},
note = {{Vietnamese Wikipedia articles enriched with Wikidata metadata}}
}}
```
## Related Resources
- [Underthesea](https://github.com/undertheseanlp/underthesea) - Vietnamese NLP Toolkit
- [PhoBERT](https://github.com/VinAIResearch/PhoBERT) - Pre-trained language models for Vietnamese
- [Vietnamese Wikipedia](https://vi.wikipedia.org)
- [Wikidata](https://www.wikidata.org)
## License
This dataset is released under the [Creative Commons Attribution-ShareAlike 4.0 International License (CC BY-SA 4.0)](https://creativecommons.org/licenses/by-sa/4.0/), consistent with the Wikipedia content license.
---
<div align="center">
Made with ❤️ by <a href="https://github.com/undertheseanlp">Underthesea NLP</a>
</div>
'''
HF_DIR.mkdir(parents=True, exist_ok=True)
# Save to main HF directory
readme_path = HF_DIR / "README.md"
with open(readme_path, "w", encoding="utf-8") as f:
f.write(card_content)
print(f" Dataset card saved to: {readme_path}")
# Also save to parquet directory for Hub upload
parquet_readme = HF_DIR / "uvw_2026_parquet" / "README.md"
if parquet_readme.parent.exists():
with open(parquet_readme, "w", encoding="utf-8") as f:
f.write(card_content)
print(f" Dataset card saved to: {parquet_readme}")
def push_to_hub(repo_id: str, private: bool = False):
"""Push dataset to HuggingFace Hub."""
if not HF_HUB_AVAILABLE:
print("Error: huggingface-hub library not installed")
print("Install with: uv sync --extra huggingface")
return False
parquet_dir = HF_DIR / "uvw_2026_parquet"
if not parquet_dir.exists():
print(f"Error: Parquet directory not found: {parquet_dir}")
print("Please run without --push first to generate the dataset.")
return False
print(f"\nPushing to HuggingFace Hub: {repo_id}")
try:
api = HfApi()
# Check if user is logged in
try:
user_info = api.whoami()
print(f" Authenticated as: {user_info['name']}")
except Exception:
print(" Not logged in. Please run: huggingface-cli login")
return False
# Create or update the repository
print(f" Creating/updating repository...")
api.create_repo(
repo_id=repo_id,
repo_type="dataset",
private=private,
exist_ok=True,
)
# Upload all files in parquet directory
print(f" Uploading files from {parquet_dir}...")
api.upload_folder(
folder_path=str(parquet_dir),
repo_id=repo_id,
repo_type="dataset",
commit_message="Update UVW 2026 dataset",
)
print(f"\n Successfully pushed to: https://huggingface.co/datasets/{repo_id}")
return True
except Exception as e:
print(f"Error pushing to Hub: {e}")
return False
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description="Prepare UVW 2026 dataset for HuggingFace Hub",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Generate dataset files only
python scripts/prepare_huggingface.py
# Generate and push to Hub
python scripts/prepare_huggingface.py --push
# Push to a custom repository
python scripts/prepare_huggingface.py --push --repo-id myorg/my-dataset
# Push as private dataset
python scripts/prepare_huggingface.py --push --private
# Only push (skip regeneration)
python scripts/prepare_huggingface.py --push --skip-generate
""",
)
parser.add_argument(
"--push",
action="store_true",
help="Push dataset to HuggingFace Hub after generation",
)
parser.add_argument(
"--repo-id",
type=str,
default="undertheseanlp/UVW-2026",
help="HuggingFace repository ID (default: undertheseanlp/UVW-2026)",
)
parser.add_argument(
"--private",
action="store_true",
help="Create private repository on Hub",
)
parser.add_argument(
"--skip-generate",
action="store_true",
help="Skip dataset generation, only push existing files",
)
parser.add_argument(
"--stats",
action="store_true",
help="Calculate detailed statistics for dataset card",
)
return parser.parse_args()
def main():
"""Prepare dataset for HuggingFace Hub."""
args = parse_args()
print("=" * 60)
print("UVW 2026 - HuggingFace Dataset Preparation")
print("=" * 60)
split_counts = None
stats = None
if not args.skip_generate:
# Check dependencies
if not HF_AVAILABLE:
print("\nError: datasets library not installed")
print("Install with: uv sync --extra huggingface")
return
# Check if splits exist
if not SPLITS_DIR.exists():
print(f"\nError: Splits directory not found: {SPLITS_DIR}")
print("Please run create_splits.py first.")
return
# Calculate statistics if requested
if args.stats:
print("\nCalculating dataset statistics...")
stats = load_dataset_statistics()
# Create dataset
print("\nCreating HuggingFace dataset...")
result = create_hf_dataset()
if result:
_, split_counts = result
# Create dataset card
print("\nCreating dataset card...")
create_dataset_card(split_counts, stats)
print("\nDataset preparation complete!")
# Push to Hub if requested
if args.push:
success = push_to_hub(args.repo_id, private=args.private)
if not success:
return
# Show next steps if not pushing
if not args.push:
print("\n" + "-" * 60)
print("Next steps:")
print("-" * 60)
print("\nTo upload to HuggingFace Hub, run:")
print(f" python scripts/prepare_huggingface.py --push")
print("\nOr upload manually:")
print(" 1. huggingface-cli login")
print(" 2. huggingface-cli upload undertheseanlp/UVW-2026 data/huggingface/uvw_2026_parquet")
print("\nDone!")
if __name__ == "__main__":
main()