Devansh0711 commited on
Commit
25fc268
·
verified ·
1 Parent(s): ce312c4

Upload train_tokenizer.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. train_tokenizer.py +64 -0
train_tokenizer.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # train_tokenizer.py
2
+ from tokenizers import ByteLevelBPETokenizer
3
+ from pathlib import Path
4
+ import requests
5
+ #Devansh Sinha
6
+ texts = []
7
+ #Devansh Sinha
8
+ # Download multiple books from Project Gutenberg for a larger dataset
9
+ urls = [
10
+ "https://www.gutenberg.org/files/11/11-0.txt", # Alice in Wonderland
11
+ "https://www.gutenberg.org/files/1342/1342-0.txt", # Pride and Prejudice
12
+ "https://www.gutenberg.org/files/84/84-0.txt", # Frankenstein
13
+ "https://www.gutenberg.org/files/1661/1661-0.txt", # Sherlock Holmes
14
+ "https://www.gutenberg.org/files/2701/2701-0.txt", # Moby Dick
15
+ "https://www.gutenberg.org/files/98/98-0.txt", # A Tale of Two Cities
16
+ "https://www.gutenberg.org/files/5200/5200-0.txt", # Metamorphosis
17
+ "https://www.gutenberg.org/files/2600/2600-0.txt", # War and Peace
18
+ "https://www.gutenberg.org/files/74/74-0.txt", # The Adventures of Tom Sawyer
19
+ "https://www.gutenberg.org/files/1400/1400-0.txt", # Great Expectations
20
+ ]
21
+ #Devansh Sinha
22
+ for url in urls:
23
+ print(f"Downloading {url} ...")
24
+ response = requests.get(url)
25
+ if response.status_code == 200:
26
+ book_texts = [line.strip() for line in response.text.split('\n') if line.strip()]
27
+ texts.extend(book_texts)
28
+ print(f"Added {len(book_texts)} lines.")
29
+ else:
30
+ print(f"Failed to download {url}")
31
+ #Devansh Sinha
32
+ print(f"Total lines collected: {len(texts)}")
33
+ #Devansh Sinha
34
+ # Count total number of training tokens (words/wordpieces)
35
+ total_tokens = sum(len(line.split()) for line in texts)
36
+ print(f"Total number of training tokens (approximate, whitespace split): {total_tokens}")
37
+
38
+ # Save all texts to a temporary file for training
39
+ corpus_path = "corpus.txt"
40
+ with open(corpus_path, "w", encoding="utf-8") as f:
41
+ for line in texts:
42
+ f.write(line + "\n")
43
+ #Devansh Sinha
44
+ # Train a Byte Pair Encoding (BPE) tokenizer
45
+ tokenizer = ByteLevelBPETokenizer()
46
+ tokenizer.train(
47
+ files=corpus_path,
48
+ vocab_size=10000,
49
+ min_frequency=2,
50
+ special_tokens=["<s>", "<pad>", "</s>", "<unk>", "<mask>"]
51
+ )
52
+ #Devansh Sinha
53
+ # Save the tokenizer
54
+ save_dir = "my-10k-bpe-tokenizer"
55
+ Path(save_dir).mkdir(exist_ok=True)
56
+ tokenizer.save_model(save_dir)
57
+ #Devansh Sinha
58
+ # Save as HuggingFace tokenizer JSON for compatibility
59
+ tokenizer_json_path = str(Path(save_dir) / "tokenizer.json")
60
+ tokenizer.save(tokenizer_json_path)
61
+ print(f"Saved HuggingFace-compatible tokenizer.json to {tokenizer_json_path}")
62
+
63
+ print(f"BPE tokenizer trained and saved to {save_dir}/")
64
+ print(f"Number of tokens in vocab: {tokenizer.get_vocab_size()}")