lm1b / lm1b_builder.py
FrankCCCCC's picture
Upload lm1b_builder.py with huggingface_hub
21d4889 verified
import os
class LM1B:
def __init__(self, root_path: str = "dataset/lm1b"):
self._root_path: str = root_path
self._tf_path = os.path.join(root_path, "tf")
self._hf_path = os.path.join(root_path, "hf")
def download(self):
"""Download LM1B dataset from TensorFlow Datasets.
Downloads the dataset and saves it under ${self._root_path}/tf
Reference: https://www.tensorflow.org/datasets/catalog/lm1b
"""
import tensorflow_datasets as tfds
# Create the tf directory if it doesn't exist
os.makedirs(self._tf_path, exist_ok=True)
# Download LM1B dataset using TFDS
# This will download and prepare the dataset
tfds.load(
"lm1b",
data_dir=self._tf_path,
download=True,
)
print(f"LM1B dataset downloaded to {self._tf_path}")
def convert_to_hf(self):
"""Load downloaded LM1B with TFDS and convert it to HuggingFace Dataset format.
Saves the HuggingFace version under ${self._root_path}/hf
"""
import tensorflow_datasets as tfds
from datasets import Dataset, DatasetDict
# Create the hf directory if it doesn't exist
os.makedirs(self._hf_path, exist_ok=True)
# Load the dataset from the local tf directory
ds_builder = tfds.builder("lm1b", data_dir=self._tf_path)
# Get available splits
splits = ds_builder.info.splits
hf_datasets = {}
for split_name in splits:
print(f"Converting {split_name} split...")
# Load the TF dataset
tf_dataset = tfds.load(
"lm1b",
split=split_name,
data_dir=self._tf_path,
download=False,
)
# Convert to list of dictionaries
texts = []
for example in tf_dataset:
# LM1B has a 'text' field
text = example["text"].numpy().decode("utf-8")
texts.append({"text": text})
# Create HuggingFace Dataset
hf_datasets[split_name] = Dataset.from_list(texts)
print(f" Converted {len(texts)} examples")
# Create DatasetDict and save
dataset_dict = DatasetDict(hf_datasets)
dataset_dict.save_to_disk(self._hf_path)
print(f"HuggingFace dataset saved to {self._hf_path}")
def add_meta_data(self):
"""Add detailed metadata to the converted dataset.
Specifies entry field, split information, and dataset card.
"""
from datasets import load_from_disk
import json
# Load the HF dataset
dataset = load_from_disk(self._hf_path)
# Create metadata
metadata = {
"name": "lm1b",
"description": "One Billion Word Benchmark for Language Modeling. "
"A benchmark corpus for measuring progress in statistical language modeling.",
"citation": """@inproceedings{chelba2013one,
title={One billion word benchmark for measuring progress in statistical language modeling},
author={Chelba, Ciprian and Mikolov, Tomas and Schuster, Mike and Ge, Qi and Brants, Thorsten and Koehn, Phillipp and Robinson, Tony},
booktitle={Interspeech},
year={2014}
}""",
"homepage": "https://www.statmt.org/lm-benchmark/",
"license": "Apache-2.0",
"features": {
"text": "string - The text content of each example"
},
"splits": {k: {"num_examples": len(v)} for k, v in dataset.items()},
"task_categories": ["text-generation", "fill-mask"],
"language": ["en"],
"size_category": "1B<n<10B",
}
# Save metadata
metadata_path = os.path.join(self._hf_path, "metadata.json")
with open(metadata_path, "w") as f:
json.dump(metadata, f, indent=2)
# Create a README dataset card
readme_content = f"""---
license: apache-2.0
task_categories:
- text-generation
- fill-mask
language:
- en
size_categories:
- 1B<n<10B
---
# LM1B - One Billion Word Benchmark
## Dataset Description
The One Billion Word Benchmark is a large language modeling dataset.
It contains approximately one billion words of training data derived from news articles.
## How was this dataset built?
We download the full LM1B dataset from TensorFlow Datasets (TFDS) and convert it to HuggingFace format automatically. The full script is in `lm1b.py`. The required environment is:
- tensorflow==2.20.0
- tensorflow-datasets==4.9.9
- huggingface_hub==1.3.3
- datasets==4.4.1
```bash
pip install tensorflow==2.20.0 tensorflow-datasets==4.9.9 huggingface_hub==1.3.3 datasets==4.4.1
python lm1b_builder.py --action all
```
## Dataset Structure
### Data Fields
- `text`: A string containing the text content
### Data Splits
| Split | Examples |
|-------|----------|
"""
for split_name, split_data in dataset.items():
readme_content += f"| {split_name} | {len(split_data):,} |\n"
readme_content += """
## Citation
```bibtex
@inproceedings{chelba2013one,
title={One billion word benchmark for measuring progress in statistical language modeling},
author={Chelba, Ciprian and Mikolov, Tomas and Schuster, Mike and Ge, Qi and Brants, Thorsten and Koehn, Phillipp and Robinson, Tony},
booktitle={Interspeech},
year={2014}
}
```
## License
Apache 2.0
"""
readme_path = os.path.join(self._hf_path, "README.md")
with open(readme_path, "w") as f:
f.write(readme_content)
print(f"Metadata saved to {metadata_path}")
print(f"README saved to {readme_path}")
def upload_to_hf(self, user: str, name: str):
"""Upload the converted dataset to HuggingFace Hub.
Args:
user: HuggingFace username or organization
name: Repository name for the dataset
"""
from datasets import load_from_disk
from huggingface_hub import HfApi
# Load the dataset
dataset = load_from_disk(self._hf_path)
# Create the repo name
repo_id = f"{user}/{name}"
# Push to hub
dataset.push_to_hub(
repo_id,
private=False,
)
# Upload additional files (README.md, metadata.json)
api = HfApi()
readme_path = os.path.join(self._hf_path, "README.md")
if os.path.exists(readme_path):
api.upload_file(
path_or_fileobj=readme_path,
path_in_repo="README.md",
repo_id=repo_id,
repo_type="dataset",
)
metadata_path = os.path.join(self._hf_path, "metadata.json")
if os.path.exists(metadata_path):
api.upload_file(
path_or_fileobj=metadata_path,
path_in_repo="metadata.json",
repo_id=repo_id,
repo_type="dataset",
)
# Upload this script itself for reproducibility
script_path = os.path.abspath(__file__)
if os.path.exists(script_path):
api.upload_file(
path_or_fileobj=script_path,
path_in_repo="lm1b_builder.py",
repo_id=repo_id,
repo_type="dataset",
)
print(f"Dataset uploaded to https://huggingface.co/datasets/{repo_id}")
def test(self):
"""Test if the converted HuggingFace dataset can be loaded correctly."""
from datasets import load_from_disk
print("Testing local HuggingFace dataset...")
# Load from disk
dataset = load_from_disk(self._hf_path)
print(f"Dataset loaded successfully!")
print(f"Available splits: {list(dataset.keys())}")
for split_name, split_data in dataset.items():
print(f"\n{split_name} split:")
print(f" Number of examples: {len(split_data):,}")
print(f" Features: {split_data.features}")
# Show a sample
if len(split_data) > 0:
sample = split_data[0]
text_preview = sample["text"][:100] + "..." if len(sample["text"]) > 100 else sample["text"]
print(f" Sample text: {text_preview}")
print("\nAll tests passed!")
return True
def download_convert_metadata_test(self):
"""Unified method that runs the full pipeline: download, convert, add metadata, and test."""
print("=== Step 1: Download ===")
self.download()
print("\n=== Step 2: Convert to HuggingFace ===")
self.convert_to_hf()
print("\n=== Step 3: Add Metadata ===")
self.add_meta_data()
print("\n=== Step 4: Test ===")
self.test()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="LM1B Dataset Preparation")
parser.add_argument("--root_path", type=str, default="dataset/lm1b", help="Root path for the dataset")
parser.add_argument("--action", type=str, required=True,
choices=["download", "convert", "metadata", "upload", "test", "all"],
help="Action to perform")
parser.add_argument("--user", type=str, help="HuggingFace username (for upload)")
parser.add_argument("--name", type=str, help="Dataset name on HuggingFace (for upload)")
args = parser.parse_args()
lm1b = LM1B(root_path=args.root_path)
if args.action == "download":
lm1b.download()
elif args.action == "convert":
lm1b.convert_to_hf()
elif args.action == "metadata":
lm1b.add_meta_data()
elif args.action == "upload":
if not args.user or not args.name:
raise ValueError("--user and --name are required for upload")
lm1b.upload_to_hf(args.user, args.name)
elif args.action == "test":
lm1b.test()
elif args.action == "all":
lm1b.download_convert_metadata_test()
print("\nTo upload, run with --action upload --user <user> --name <name>")