| import os | |
| import re | |
| import pandas as pd | |
| import numpy as np | |
| import datasets | |
| logger = datasets.logging.get_logger(__name__) | |
| import datasets | |
| URL = "https://huggingface.co/datasets/thewall/tokenizer/resolve/main" | |
| class TokenizerConfig(datasets.BuilderConfig): | |
| def __init__(self, **kwargs): | |
| super(TokenizerConfig, self).__init__(**kwargs) | |
| class Tokenizer(datasets.GeneratorBasedBuilder): | |
| BUILDER_CONFIGS = [ | |
| TokenizerConfig(name=key) for key in ["esm", "progen2", "raptgen", "aptamer"] | |
| ] | |
| DEFAULT_CONFIG_NAME = "esm" | |
| def _info(self): | |
| return datasets.DatasetInfo( | |
| features=datasets.Features( | |
| { | |
| "id": datasets.Value("int32"), | |
| "name": datasets.Value("string"), | |
| "path": datasets.Value("string"), | |
| "tokenizer": datasets.Value("string"), | |
| "special_tokens_map": datasets.Value("string"), | |
| } | |
| ), | |
| ) | |
| def _split_generators(self, dl_manager): | |
| file = dl_manager.download_and_extract(f"{URL}/{self.config.name}.tar.gz") | |
| return [ | |
| datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": file}), | |
| ] | |
| def _generate_examples(self, filepath): | |
| """This function returns the examples in the raw (text) form.""" | |
| logger.info("generating examples from = %s", filepath) | |
| name = self.config.name | |
| tokenizer_file = os.path.join(filepath, name, "tokenizer.json") | |
| tokens_map_file = os.path.join(filepath, name, "special_tokens_map.json") | |
| with open(tokenizer_file) as f: | |
| tokenizer = "".join(f.readlines()) | |
| with open(tokens_map_file) as f: | |
| special_tokens = "".join(f.readlines()) | |
| yield 0, {"id": 0, | |
| "path": os.path.join(filepath, name), | |
| "name": name, | |
| "tokenizer": tokenizer, | |
| "special_tokens_map": special_tokens,} | |
| if __name__=="__main__": | |
| from datasets import load_dataset | |
| from tokenizers import Tokenizer | |
| from transformers import PreTrainedTokenizerFast | |
| import json | |
| dataset = load_dataset("tokenizer.py", split="all") | |
| tokenizer = Tokenizer.from_str(dataset[0]['tokenizer']) | |
| token_map = json.loads(dataset[0]['special_tokens_map']) | |
| fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer, **token_map) | |