import os class LM1B: def __init__(self, root_path: str = "dataset/lm1b"): self._root_path: str = root_path self._tf_path = os.path.join(root_path, "tf") self._hf_path = os.path.join(root_path, "hf") def download(self): """Download LM1B dataset from TensorFlow Datasets. Downloads the dataset and saves it under ${self._root_path}/tf Reference: https://www.tensorflow.org/datasets/catalog/lm1b """ import tensorflow_datasets as tfds # Create the tf directory if it doesn't exist os.makedirs(self._tf_path, exist_ok=True) # Download LM1B dataset using TFDS # This will download and prepare the dataset tfds.load( "lm1b", data_dir=self._tf_path, download=True, ) print(f"LM1B dataset downloaded to {self._tf_path}") def convert_to_hf(self): """Load downloaded LM1B with TFDS and convert it to HuggingFace Dataset format. Saves the HuggingFace version under ${self._root_path}/hf """ import tensorflow_datasets as tfds from datasets import Dataset, DatasetDict # Create the hf directory if it doesn't exist os.makedirs(self._hf_path, exist_ok=True) # Load the dataset from the local tf directory ds_builder = tfds.builder("lm1b", data_dir=self._tf_path) # Get available splits splits = ds_builder.info.splits hf_datasets = {} for split_name in splits: print(f"Converting {split_name} split...") # Load the TF dataset tf_dataset = tfds.load( "lm1b", split=split_name, data_dir=self._tf_path, download=False, ) # Convert to list of dictionaries texts = [] for example in tf_dataset: # LM1B has a 'text' field text = example["text"].numpy().decode("utf-8") texts.append({"text": text}) # Create HuggingFace Dataset hf_datasets[split_name] = Dataset.from_list(texts) print(f" Converted {len(texts)} examples") # Create DatasetDict and save dataset_dict = DatasetDict(hf_datasets) dataset_dict.save_to_disk(self._hf_path) print(f"HuggingFace dataset saved to {self._hf_path}") def add_meta_data(self): """Add detailed metadata to the converted dataset. Specifies entry field, split information, and dataset card. """ from datasets import load_from_disk import json # Load the HF dataset dataset = load_from_disk(self._hf_path) # Create metadata metadata = { "name": "lm1b", "description": "One Billion Word Benchmark for Language Modeling. " "A benchmark corpus for measuring progress in statistical language modeling.", "citation": """@inproceedings{chelba2013one, title={One billion word benchmark for measuring progress in statistical language modeling}, author={Chelba, Ciprian and Mikolov, Tomas and Schuster, Mike and Ge, Qi and Brants, Thorsten and Koehn, Phillipp and Robinson, Tony}, booktitle={Interspeech}, year={2014} }""", "homepage": "https://www.statmt.org/lm-benchmark/", "license": "Apache-2.0", "features": { "text": "string - The text content of each example" }, "splits": {k: {"num_examples": len(v)} for k, v in dataset.items()}, "task_categories": ["text-generation", "fill-mask"], "language": ["en"], "size_category": "1B 0: sample = split_data[0] text_preview = sample["text"][:100] + "..." if len(sample["text"]) > 100 else sample["text"] print(f" Sample text: {text_preview}") print("\nAll tests passed!") return True def download_convert_metadata_test(self): """Unified method that runs the full pipeline: download, convert, add metadata, and test.""" print("=== Step 1: Download ===") self.download() print("\n=== Step 2: Convert to HuggingFace ===") self.convert_to_hf() print("\n=== Step 3: Add Metadata ===") self.add_meta_data() print("\n=== Step 4: Test ===") self.test() if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description="LM1B Dataset Preparation") parser.add_argument("--root_path", type=str, default="dataset/lm1b", help="Root path for the dataset") parser.add_argument("--action", type=str, required=True, choices=["download", "convert", "metadata", "upload", "test", "all"], help="Action to perform") parser.add_argument("--user", type=str, help="HuggingFace username (for upload)") parser.add_argument("--name", type=str, help="Dataset name on HuggingFace (for upload)") args = parser.parse_args() lm1b = LM1B(root_path=args.root_path) if args.action == "download": lm1b.download() elif args.action == "convert": lm1b.convert_to_hf() elif args.action == "metadata": lm1b.add_meta_data() elif args.action == "upload": if not args.user or not args.name: raise ValueError("--user and --name are required for upload") lm1b.upload_to_hf(args.user, args.name) elif args.action == "test": lm1b.test() elif args.action == "all": lm1b.download_convert_metadata_test() print("\nTo upload, run with --action upload --user --name ")