|
|
import os |
|
|
import glob |
|
|
import numpy as np |
|
|
from typing import Optional |
|
|
from datasets import BuilderConfig, DatasetBuilder, DatasetInfo, Features, Value, Sequence, SplitGenerator |
|
|
from dataclasses import dataclass |
|
|
|
|
|
|
|
|
DOCUMENT_SIZE = 65536 |
|
|
MAX_SEQUENCE_LENGTH = DOCUMENT_SIZE - 1 |
|
|
|
|
|
@dataclass |
|
|
class LongCrawl64Config(BuilderConfig): |
|
|
"""BuilderConfig for LongCrawl64.""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
name: str = "default", |
|
|
version: str = "1.0.0", |
|
|
description: Optional[str] = None, |
|
|
block_size: int = 1024, |
|
|
**kwargs, |
|
|
): |
|
|
"""BuilderConfig for LongCrawl64. |
|
|
|
|
|
Args: |
|
|
name: Short name of the dataset configuration |
|
|
version: Version string |
|
|
description: Description of the dataset configuration |
|
|
block_size: Length of sequences to generate (default: 1024, max: 65535) |
|
|
**kwargs: Keyword arguments forwarded to super. |
|
|
""" |
|
|
description = description or "Default configuration for LongCrawl64 dataset" |
|
|
super().__init__(name=name, version=version, description=description, **kwargs) |
|
|
self._block_size = min(block_size, MAX_SEQUENCE_LENGTH) |
|
|
|
|
|
@property |
|
|
def block_size(self) -> int: |
|
|
"""Get the sequence length for this configuration.""" |
|
|
return self._block_size |
|
|
|
|
|
|
|
|
class LongCrawl64(DatasetBuilder): |
|
|
"""LongCrawl64: A dataset for training language models with long context. |
|
|
|
|
|
This dataset implements the same memory-mapped random access pattern as the original |
|
|
training script, but presents it through a user-friendly HuggingFace dataset interface. |
|
|
|
|
|
The sequence length is configurable when loading the dataset (up to 65535 tokens). |
|
|
""" |
|
|
|
|
|
VERSION = "1.0.0" |
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
|
LongCrawl64Config( |
|
|
name="default", |
|
|
description="Default configuration for LongCrawl64 dataset", |
|
|
block_size=1024, |
|
|
), |
|
|
] |
|
|
|
|
|
DEFAULT_CONFIG_NAME = "default" |
|
|
|
|
|
def _info(self): |
|
|
return DatasetInfo( |
|
|
description=f""" |
|
|
LongCrawl64: A dataset for training language models with long context. |
|
|
Each document is {DOCUMENT_SIZE} tokens long. This dataset uses memory mapping for |
|
|
efficient random access across the entire dataset. |
|
|
|
|
|
The sequence length is configurable when loading the dataset (up to {MAX_SEQUENCE_LENGTH} tokens). |
|
|
Current sequence length: {getattr(self.config, 'block_size', 1024)} tokens. |
|
|
""", |
|
|
features=Features({ |
|
|
|
|
|
"input_ids": Sequence(Value("int32")), |
|
|
"labels": Sequence(Value("int32")), |
|
|
}), |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
data_dir = dl_manager.download_and_extract(".") |
|
|
if isinstance(data_dir, str): |
|
|
data_dir = os.path.join(os.getcwd(), data_dir) |
|
|
else: |
|
|
data_dir = str(data_dir) |
|
|
|
|
|
|
|
|
train_pattern = os.path.join(data_dir, "train-*.bin") |
|
|
val_pattern = os.path.join(data_dir, "val-*.bin") |
|
|
|
|
|
return [ |
|
|
SplitGenerator( |
|
|
name="train", |
|
|
gen_kwargs={ |
|
|
"pattern": train_pattern, |
|
|
}, |
|
|
), |
|
|
SplitGenerator( |
|
|
name="validation", |
|
|
gen_kwargs={ |
|
|
"pattern": val_pattern, |
|
|
}, |
|
|
), |
|
|
] |
|
|
|
|
|
def _generate_examples(self, pattern): |
|
|
"""Generate examples using memory mapping. |
|
|
|
|
|
This functions similar to the original get_batch function in the training code, |
|
|
using memory mapping for efficient random access to the dataset. |
|
|
""" |
|
|
|
|
|
files = sorted(glob.glob(pattern)) |
|
|
if not files: |
|
|
raise ValueError(f"No files found matching pattern {pattern}. Dataset cannot be empty.") |
|
|
|
|
|
|
|
|
file_sizes = [] |
|
|
for file_path in files: |
|
|
|
|
|
size_bytes = os.path.getsize(file_path) |
|
|
size_tokens = size_bytes // 2 |
|
|
file_sizes.append(size_tokens) |
|
|
|
|
|
|
|
|
total_tokens = sum(file_sizes) |
|
|
|
|
|
|
|
|
config_sequence_length = getattr(self.config, 'sequence_length', 1024) |
|
|
|
|
|
def get_random_batch(idx): |
|
|
"""Generate a random batch internally. |
|
|
|
|
|
Args: |
|
|
idx: Batch index (unused, just for API compatibility) |
|
|
sequence_length: Length of sequence to generate (default: None, uses config value) |
|
|
""" |
|
|
|
|
|
sequence_length = min(config_sequence_length, MAX_SEQUENCE_LENGTH) |
|
|
|
|
|
|
|
|
file_idx = np.random.choice(len(files), p=np.array(file_sizes) / total_tokens) |
|
|
file_path = files[file_idx] |
|
|
file_size = file_sizes[file_idx] |
|
|
|
|
|
|
|
|
max_position = file_size - (sequence_length + 1) |
|
|
|
|
|
|
|
|
if max_position <= 0: |
|
|
print(f"Warning: File {file_path} is too small for sequence length {sequence_length}. Using smaller sequence.") |
|
|
sequence_length = max(1, file_size - 1) |
|
|
max_position = 0 |
|
|
|
|
|
|
|
|
pos_within_file = np.random.randint(0, max_position) |
|
|
|
|
|
|
|
|
data = np.memmap(file_path, dtype=np.uint16, mode='r') |
|
|
|
|
|
|
|
|
src = data[pos_within_file:pos_within_file + sequence_length + 1].astype(np.int32) |
|
|
|
|
|
|
|
|
input_ids = src[:-1] |
|
|
labels = src[1:] |
|
|
|
|
|
return { |
|
|
"input_ids": input_ids, |
|
|
"labels": labels, |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
for i in range(10000000): |
|
|
|
|
|
yield i, get_random_batch(i) |
|
|
|
|
|
|