thewall commited on
Commit
a5232dd
·
1 Parent(s): 691d785

Upload tokenizer.py

Browse files
Files changed (1) hide show
  1. tokenizer.py +69 -0
tokenizer.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import pandas as pd
4
+ import numpy as np
5
+ import datasets
6
+
7
+ logger = datasets.logging.get_logger(__name__)
8
+
9
+ import datasets
10
+
11
+ URL = "https://huggingface.co/datasets/thewall/tokenizer/resolve/main"
12
+
13
+
14
+ class TokenizerConfig(datasets.BuilderConfig):
15
+ def __init__(self, **kwargs):
16
+ super(TokenizerConfig, self).__init__(**kwargs)
17
+
18
+ class Tokenizer(datasets.GeneratorBasedBuilder):
19
+ BUILDER_CONFIGS = [
20
+ TokenizerConfig(name=key) for key in ["esm", "progen2"]
21
+ ]
22
+
23
+ DEFAULT_CONFIG_NAME = "esm"
24
+
25
+ def _info(self):
26
+ return datasets.DatasetInfo(
27
+ features=datasets.Features(
28
+ {
29
+ "id": datasets.Value("int32"),
30
+ "name": datasets.Value("string"),
31
+ "path": datasets.Value("string"),
32
+ "tokenizer": datasets.Value("string"),
33
+ "special_tokens_map": datasets.Value("string"),
34
+ }
35
+ ),
36
+ )
37
+
38
+ def _split_generators(self, dl_manager):
39
+ file = dl_manager.download_and_extract(f"{URL}/{self.config.name}.tar.gz")
40
+ return [
41
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": file}),
42
+ ]
43
+
44
+ def _generate_examples(self, filepath):
45
+ """This function returns the examples in the raw (text) form."""
46
+ logger.info("generating examples from = %s", filepath)
47
+ name = self.config.name
48
+ tokenizer_file = os.path.join(filepath, name, "tokenizer.json")
49
+ tokens_map_file = os.path.join(filepath, name, "special_tokens_map.json")
50
+ with open(tokenizer_file) as f:
51
+ tokenizer = "".join(f.readlines())
52
+ with open(tokens_map_file) as f:
53
+ special_tokens = "".join(f.readlines())
54
+ yield 0, {"id": 0,
55
+ "path": os.path.join(filepath, name),
56
+ "name": name,
57
+ "tokenizer": tokenizer,
58
+ "special_tokens_map": special_tokens,}
59
+
60
+
61
+ if __name__=="__main__":
62
+ from datasets import load_dataset
63
+ from tokenizers import Tokenizer
64
+ from transformers import PreTrainedTokenizerFast
65
+ import json
66
+ dataset = load_dataset("tokenizer.py", split="all")
67
+ tokenizer = Tokenizer.from_str(dataset[0]['tokenizer'])
68
+ token_map = json.loads(dataset[0]['special_tokens_map'])
69
+ fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer, **token_map)