pile-tokenized / split_pile_data.py
Lanni-ni's picture
Upload folder using huggingface_hub
0b09f0a verified
#!/usr/bin/env python3
"""
划分 Pile 数据集为 train/val/test
与 BabyLM 保持相同的命名格式
"""
import numpy as np
from pathlib import Path
import json
def split_pile_data(
input_file="batch_0_to_1000.npy",
output_dir="tokenized",
train_ratio=0.8,
val_ratio=0.1,
seed=42
):
"""划分 Pile 数据"""
print("="*70)
print("📊 Splitting Pile Dataset")
print("="*70)
print(f"Input: {input_file}")
print(f"Output: {output_dir}/")
print(f"Split: train={train_ratio:.0%}, val={val_ratio:.0%}, test={1-train_ratio-val_ratio:.0%}")
print(f"Seed: {seed}")
print("="*70)
print()
# 加载数据
print("📥 Loading data...")
data = np.load(input_file, allow_pickle=False)
print(f" Shape: {data.shape}")
print(f" Dtype: {data.dtype}")
print(f" Total samples: {len(data):,}")
print(f" Sequence length: {data.shape[1]}")
print()
# 设置随机种子
np.random.seed(seed)
# 打乱索引
print("🔀 Shuffling indices...")
indices = np.arange(len(data))
np.random.shuffle(indices)
# 计算划分点
n_total = len(data)
n_train = int(n_total * train_ratio)
n_val = int(n_total * val_ratio)
# 划分索引
train_indices = indices[:n_train]
val_indices = indices[n_train:n_train + n_val]
test_indices = indices[n_train + n_val:]
print("✂️ Splitting...")
print(f" Train: {len(train_indices):,} samples ({len(train_indices)/n_total*100:.1f}%)")
print(f" Val: {len(val_indices):,} samples ({len(val_indices)/n_total*100:.1f}%)")
print(f" Test: {len(test_indices):,} samples ({len(test_indices)/n_total*100:.1f}%)")
print()
# 创建输出目录
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
print("💾 Saving splits...")
# Train
print(f" → train.npy...")
np.save(output_dir / "train.npy", data[train_indices])
size_mb = (output_dir / "train.npy").stat().st_size / (1024**2)
print(f" ✅ {size_mb:.1f} MB")
# Val (eval)
print(f" → eval.npy...")
np.save(output_dir / "eval.npy", data[val_indices])
size_mb = (output_dir / "eval.npy").stat().st_size / (1024**2)
print(f" ✅ {size_mb:.1f} MB")
# Test
print(f" → test.npy...")
np.save(output_dir / "test.npy", data[test_indices])
size_mb = (output_dir / "test.npy").stat().st_size / (1024**2)
print(f" ✅ {size_mb:.1f} MB")
# 创建 metadata.json(与 BabyLM 格式一致)
print(f" → metadata.json...")
metadata = {
"vocab_size": int(data.max()) + 1, # 最大 token ID + 1
"sequence_length": int(data.shape[1]),
"num_train": int(len(train_indices)),
"num_eval": int(len(val_indices)),
"num_test": int(len(test_indices)),
"total_samples": int(n_total),
"dtype": str(data.dtype),
"seed": seed
}
with open(output_dir / "metadata.json", 'w') as f:
json.dump(metadata, f, indent=2)
print(f" ✅ Saved")
print()
print("="*70)
print("✅ Split Complete!")
print("="*70)
print()
print("📁 Output structure:")
print(f" {output_dir}/")
print(f" ├── train.npy ({len(train_indices):,} samples)")
print(f" ├── eval.npy ({len(val_indices):,} samples)")
print(f" ├── test.npy ({len(test_indices):,} samples)")
print(f" └── metadata.json")
print()
print("📊 Metadata:")
for key, value in metadata.items():
print(f" {key}: {value:,}" if isinstance(value, int) else f" {key}: {value}")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--input", default="batch_0_to_1000.npy")
parser.add_argument("--output", default="tokenized")
parser.add_argument("--train_ratio", type=float, default=0.8)
parser.add_argument("--val_ratio", type=float, default=0.1)
parser.add_argument("--seed", type=int, default=42)
args = parser.parse_args()
split_pile_data(
args.input,
args.output,
args.train_ratio,
args.val_ratio,
args.seed
)