File size: 4,295 Bytes
0b09f0a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 | #!/usr/bin/env python3
"""
划分 Pile 数据集为 train/val/test
与 BabyLM 保持相同的命名格式
"""
import numpy as np
from pathlib import Path
import json
def split_pile_data(
input_file="batch_0_to_1000.npy",
output_dir="tokenized",
train_ratio=0.8,
val_ratio=0.1,
seed=42
):
"""划分 Pile 数据"""
print("="*70)
print("📊 Splitting Pile Dataset")
print("="*70)
print(f"Input: {input_file}")
print(f"Output: {output_dir}/")
print(f"Split: train={train_ratio:.0%}, val={val_ratio:.0%}, test={1-train_ratio-val_ratio:.0%}")
print(f"Seed: {seed}")
print("="*70)
print()
# 加载数据
print("📥 Loading data...")
data = np.load(input_file, allow_pickle=False)
print(f" Shape: {data.shape}")
print(f" Dtype: {data.dtype}")
print(f" Total samples: {len(data):,}")
print(f" Sequence length: {data.shape[1]}")
print()
# 设置随机种子
np.random.seed(seed)
# 打乱索引
print("🔀 Shuffling indices...")
indices = np.arange(len(data))
np.random.shuffle(indices)
# 计算划分点
n_total = len(data)
n_train = int(n_total * train_ratio)
n_val = int(n_total * val_ratio)
# 划分索引
train_indices = indices[:n_train]
val_indices = indices[n_train:n_train + n_val]
test_indices = indices[n_train + n_val:]
print("✂️ Splitting...")
print(f" Train: {len(train_indices):,} samples ({len(train_indices)/n_total*100:.1f}%)")
print(f" Val: {len(val_indices):,} samples ({len(val_indices)/n_total*100:.1f}%)")
print(f" Test: {len(test_indices):,} samples ({len(test_indices)/n_total*100:.1f}%)")
print()
# 创建输出目录
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
print("💾 Saving splits...")
# Train
print(f" → train.npy...")
np.save(output_dir / "train.npy", data[train_indices])
size_mb = (output_dir / "train.npy").stat().st_size / (1024**2)
print(f" ✅ {size_mb:.1f} MB")
# Val (eval)
print(f" → eval.npy...")
np.save(output_dir / "eval.npy", data[val_indices])
size_mb = (output_dir / "eval.npy").stat().st_size / (1024**2)
print(f" ✅ {size_mb:.1f} MB")
# Test
print(f" → test.npy...")
np.save(output_dir / "test.npy", data[test_indices])
size_mb = (output_dir / "test.npy").stat().st_size / (1024**2)
print(f" ✅ {size_mb:.1f} MB")
# 创建 metadata.json(与 BabyLM 格式一致)
print(f" → metadata.json...")
metadata = {
"vocab_size": int(data.max()) + 1, # 最大 token ID + 1
"sequence_length": int(data.shape[1]),
"num_train": int(len(train_indices)),
"num_eval": int(len(val_indices)),
"num_test": int(len(test_indices)),
"total_samples": int(n_total),
"dtype": str(data.dtype),
"seed": seed
}
with open(output_dir / "metadata.json", 'w') as f:
json.dump(metadata, f, indent=2)
print(f" ✅ Saved")
print()
print("="*70)
print("✅ Split Complete!")
print("="*70)
print()
print("📁 Output structure:")
print(f" {output_dir}/")
print(f" ├── train.npy ({len(train_indices):,} samples)")
print(f" ├── eval.npy ({len(val_indices):,} samples)")
print(f" ├── test.npy ({len(test_indices):,} samples)")
print(f" └── metadata.json")
print()
print("📊 Metadata:")
for key, value in metadata.items():
print(f" {key}: {value:,}" if isinstance(value, int) else f" {key}: {value}")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--input", default="batch_0_to_1000.npy")
parser.add_argument("--output", default="tokenized")
parser.add_argument("--train_ratio", type=float, default=0.8)
parser.add_argument("--val_ratio", type=float, default=0.1)
parser.add_argument("--seed", type=int, default=42)
args = parser.parse_args()
split_pile_data(
args.input,
args.output,
args.train_ratio,
args.val_ratio,
args.seed
) |