LisaMegaWatts commited on
Commit
b3034b6
·
verified ·
1 Parent(s): c40ee2a

Upload encode_corpus.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. encode_corpus.py +108 -0
encode_corpus.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pre-encode corpus into token ID binary files for Julia training.
3
+
4
+ Saves tokens as Int32 arrays (.bin) that Julia can mmap directly,
5
+ avoiding the slow pure-Julia BPE encoding at training time.
6
+
7
+ Usage:
8
+ python encode_corpus.py # Encode train.txt + val.txt
9
+ python encode_corpus.py --tokenizer output/tokenizer_4k.json
10
+ """
11
+
12
+ import argparse
13
+ import logging
14
+ import struct
15
+ import numpy as np
16
+ from pathlib import Path
17
+ from tokenizers import Tokenizer
18
+
19
+ logger = logging.getLogger("encode_corpus")
20
+ SCRIPT_DIR = Path(__file__).resolve().parent
21
+
22
+ BATCH_LINES = 100_000 # encode this many lines at once
23
+
24
+
25
+ def encode_file(tokenizer: Tokenizer, input_path: Path, output_path: Path, offset: int = 1):
26
+ """Encode a text file into a binary token ID file, streaming by line batches."""
27
+ logger.info("Encoding %s → %s", input_path, output_path)
28
+
29
+ # First pass: count lines for progress
30
+ n_lines = sum(1 for _ in open(input_path, encoding="utf-8"))
31
+ logger.info(" %d lines to encode", n_lines)
32
+
33
+ all_ids = []
34
+ total_tokens = 0
35
+
36
+ with open(input_path, encoding="utf-8") as f:
37
+ batch = []
38
+ line_count = 0
39
+ for line in f:
40
+ batch.append(line)
41
+ line_count += 1
42
+
43
+ if len(batch) >= BATCH_LINES:
44
+ encoded = tokenizer.encode_batch(batch)
45
+ for enc in encoded:
46
+ all_ids.extend(enc.ids)
47
+ total_tokens += sum(len(enc.ids) for enc in encoded)
48
+ if line_count % (BATCH_LINES * 5) == 0:
49
+ logger.info(" %d/%d lines (%.1f%%), %dM tokens so far",
50
+ line_count, n_lines,
51
+ 100 * line_count / max(n_lines, 1),
52
+ total_tokens / 1e6)
53
+ batch = []
54
+
55
+ # Final batch
56
+ if batch:
57
+ encoded = tokenizer.encode_batch(batch)
58
+ for enc in encoded:
59
+ all_ids.extend(enc.ids)
60
+ total_tokens += sum(len(enc.ids) for enc in encoded)
61
+
62
+ # Convert to numpy and apply offset
63
+ arr = np.array(all_ids, dtype=np.int32) + offset
64
+
65
+ # Write: magic (4B) + n_tokens (8B) + offset (4B) + int32 data
66
+ with open(output_path, "wb") as f:
67
+ f.write(b"JTOK")
68
+ f.write(struct.pack("<Q", len(arr)))
69
+ f.write(struct.pack("<i", offset))
70
+ arr.tofile(f)
71
+
72
+ size_mb = output_path.stat().st_size / 1024 / 1024
73
+ logger.info(" Done: %d tokens (%.1fM), %.1f MB, range [%d, %d]",
74
+ len(arr), len(arr) / 1e6, size_mb, arr.min(), arr.max())
75
+ return len(arr)
76
+
77
+
78
+ def main():
79
+ logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
80
+
81
+ parser = argparse.ArgumentParser(description="Pre-encode corpus for Julia training")
82
+ parser.add_argument("--tokenizer", type=str, default=None)
83
+ parser.add_argument("--output-dir", type=str, default=None)
84
+ args = parser.parse_args()
85
+
86
+ output_dir = Path(args.output_dir) if args.output_dir else SCRIPT_DIR / "output"
87
+ tok_path = Path(args.tokenizer) if args.tokenizer else output_dir / "tokenizer.json"
88
+
89
+ if not tok_path.exists():
90
+ raise FileNotFoundError(f"Tokenizer not found: {tok_path}")
91
+
92
+ tokenizer = Tokenizer.from_file(str(tok_path))
93
+ logger.info("Loaded tokenizer: vocab_size=%d from %s", tokenizer.get_vocab_size(), tok_path)
94
+
95
+ total = 0
96
+ for name in ["train", "val"]:
97
+ txt_path = output_dir / f"{name}.txt"
98
+ bin_path = output_dir / f"{name}.bin"
99
+ if txt_path.exists():
100
+ total += encode_file(tokenizer, txt_path, bin_path)
101
+ else:
102
+ logger.warning("Skipping %s (not found)", txt_path)
103
+
104
+ logger.info("All done! Total: %d tokens (%.1fM)", total, total / 1e6)
105
+
106
+
107
+ if __name__ == "__main__":
108
+ main()