longcrawl64test / longcrawl_dataset.py
txus's picture
Add files using upload-large-folder tool
00f6dae verified
import os
import glob
import numpy as np
from typing import Optional
from datasets import BuilderConfig, DatasetBuilder, DatasetInfo, Features, Value, Sequence, SplitGenerator
from dataclasses import dataclass
# Constants
DOCUMENT_SIZE = 65536 # Size of each document in tokens
MAX_SEQUENCE_LENGTH = DOCUMENT_SIZE - 1 # Max sequence length (-1 for labels shift)
@dataclass
class LongCrawl64Config(BuilderConfig):
"""BuilderConfig for LongCrawl64."""
def __init__(
self,
name: str = "default",
version: str = "1.0.0",
description: Optional[str] = None,
block_size: int = 1024,
**kwargs,
):
"""BuilderConfig for LongCrawl64.
Args:
name: Short name of the dataset configuration
version: Version string
description: Description of the dataset configuration
block_size: Length of sequences to generate (default: 1024, max: 65535)
**kwargs: Keyword arguments forwarded to super.
"""
description = description or "Default configuration for LongCrawl64 dataset"
super().__init__(name=name, version=version, description=description, **kwargs)
self._block_size = min(block_size, MAX_SEQUENCE_LENGTH)
@property
def block_size(self) -> int:
"""Get the sequence length for this configuration."""
return self._block_size
class LongCrawl64(DatasetBuilder):
"""LongCrawl64: A dataset for training language models with long context.
This dataset implements the same memory-mapped random access pattern as the original
training script, but presents it through a user-friendly HuggingFace dataset interface.
The sequence length is configurable when loading the dataset (up to 65535 tokens).
"""
VERSION = "1.0.0"
BUILDER_CONFIGS = [
LongCrawl64Config(
name="default",
description="Default configuration for LongCrawl64 dataset",
block_size=1024,
),
]
DEFAULT_CONFIG_NAME = "default"
def _info(self):
return DatasetInfo(
description=f"""
LongCrawl64: A dataset for training language models with long context.
Each document is {DOCUMENT_SIZE} tokens long. This dataset uses memory mapping for
efficient random access across the entire dataset.
The sequence length is configurable when loading the dataset (up to {MAX_SEQUENCE_LENGTH} tokens).
Current sequence length: {getattr(self.config, 'block_size', 1024)} tokens.
""",
features=Features({
# Dynamic sequence length - will be determined at load time
"input_ids": Sequence(Value("int32")),
"labels": Sequence(Value("int32")),
}),
)
def _split_generators(self, dl_manager):
# Correctly handle path types for different versions of datasets library
data_dir = dl_manager.download_and_extract(".")
if isinstance(data_dir, str):
data_dir = os.path.join(os.getcwd(), data_dir)
else:
data_dir = str(data_dir) # Convert to string to ensure it works with os.path.join
# Find all train and validation files
train_pattern = os.path.join(data_dir, "train-*.bin")
val_pattern = os.path.join(data_dir, "val-*.bin")
return [
SplitGenerator(
name="train", # Using string instead of Split.TRAIN
gen_kwargs={
"pattern": train_pattern,
},
),
SplitGenerator(
name="validation", # Using string instead of Split.VALIDATION
gen_kwargs={
"pattern": val_pattern,
},
),
]
def _generate_examples(self, pattern):
"""Generate examples using memory mapping.
This functions similar to the original get_batch function in the training code,
using memory mapping for efficient random access to the dataset.
"""
# Find all files that match the pattern
files = sorted(glob.glob(pattern))
if not files:
raise ValueError(f"No files found matching pattern {pattern}. Dataset cannot be empty.")
# For each file, determine its size
file_sizes = []
for file_path in files:
# Get file size in tokens (each token is 2 bytes)
size_bytes = os.path.getsize(file_path)
size_tokens = size_bytes // 2 # Each token is 2 bytes (uint16)
file_sizes.append(size_tokens)
# Calculate total tokens for weighted selection
total_tokens = sum(file_sizes)
# Capture config in closure
config_sequence_length = getattr(self.config, 'sequence_length', 1024)
def get_random_batch(idx):
"""Generate a random batch internally.
Args:
idx: Batch index (unused, just for API compatibility)
sequence_length: Length of sequence to generate (default: None, uses config value)
"""
# Ensure sequence length doesn't exceed document size
sequence_length = min(config_sequence_length, MAX_SEQUENCE_LENGTH)
# Select a random file based on file sizes (weighted by size)
file_idx = np.random.choice(len(files), p=np.array(file_sizes) / total_tokens)
file_path = files[file_idx]
file_size = file_sizes[file_idx]
# Ensure we can fit the sequence within this file
max_position = file_size - (sequence_length + 1) # +1 for labels offset
# If file is too small, just use what we have
if max_position <= 0:
print(f"Warning: File {file_path} is too small for sequence length {sequence_length}. Using smaller sequence.")
sequence_length = max(1, file_size - 1)
max_position = 0
# Sample a random position within the file
pos_within_file = np.random.randint(0, max_position)
# Load the file using memmap
data = np.memmap(file_path, dtype=np.uint16, mode='r')
# Extract the sequence - guaranteed not to cross file boundaries
src = data[pos_within_file:pos_within_file + sequence_length + 1].astype(np.int32)
# Split into input and target (labels shifted by one)
input_ids = src[:-1]
labels = src[1:]
return {
"input_ids": input_ids,
"labels": labels,
}
# The HuggingFace dataset API requires us to yield examples with indices
# We'll yield a "virtual" dataset of many examples
for i in range(10000000): # Very large number to make it seem "infinite"
# Default sequence length 1024, but this can be overridden when loading
yield i, get_random_batch(i)