File size: 2,442 Bytes
a5232dd 3bda14f a5232dd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
import os
import re
import pandas as pd
import numpy as np
import datasets
logger = datasets.logging.get_logger(__name__)
import datasets
URL = "https://huggingface.co/datasets/thewall/tokenizer/resolve/main"
class TokenizerConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super(TokenizerConfig, self).__init__(**kwargs)
class Tokenizer(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
TokenizerConfig(name=key) for key in ["esm", "progen2", "raptgen", "aptamer"]
]
DEFAULT_CONFIG_NAME = "esm"
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features(
{
"id": datasets.Value("int32"),
"name": datasets.Value("string"),
"path": datasets.Value("string"),
"tokenizer": datasets.Value("string"),
"special_tokens_map": datasets.Value("string"),
}
),
)
def _split_generators(self, dl_manager):
file = dl_manager.download_and_extract(f"{URL}/{self.config.name}.tar.gz")
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": file}),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("generating examples from = %s", filepath)
name = self.config.name
tokenizer_file = os.path.join(filepath, name, "tokenizer.json")
tokens_map_file = os.path.join(filepath, name, "special_tokens_map.json")
with open(tokenizer_file) as f:
tokenizer = "".join(f.readlines())
with open(tokens_map_file) as f:
special_tokens = "".join(f.readlines())
yield 0, {"id": 0,
"path": os.path.join(filepath, name),
"name": name,
"tokenizer": tokenizer,
"special_tokens_map": special_tokens,}
if __name__=="__main__":
from datasets import load_dataset
from tokenizers import Tokenizer
from transformers import PreTrainedTokenizerFast
import json
dataset = load_dataset("tokenizer.py", split="all")
tokenizer = Tokenizer.from_str(dataset[0]['tokenizer'])
token_map = json.loads(dataset[0]['special_tokens_map'])
fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer, **token_map)
|