| | from datasets import concatenate_datasets, load_dataset, load_from_disk |
| | import argparse |
| | from tokenizers import Tokenizer, decoders, models, pre_tokenizers, processors, trainers |
| | from transformers import GPT2TokenizerFast, AutoTokenizer |
| | from datasets import config |
| | from datasets import DatasetDict, Dataset |
| | import logging |
| |
|
| | def initialize_logger(log_file): |
| | logging.basicConfig(filename=log_file, level=logging.INFO, format='%(asctime)s: %(message)s') |
| |
|
| | def log_parameters(vocab_size, batch_size, fertility_score, proportion_continued_words, log_file='parameters.log'): |
| | initialize_logger(log_file) |
| | logging.info(f"Vocabulary Size: {vocab_size}, Batch Size: {batch_size}, Fertility Score: {fertility_score}, Proportion of Continued word: {proportion_continued_words}") |
| |
|
| | def parse_arguments(): |
| | parser = argparse.ArgumentParser() |
| | parser.add_argument( |
| | "--batch_size", |
| | type=int, |
| | required=True, |
| | help="Batch size to use for training" |
| | ) |
| | parser.add_argument( |
| | "--vocab_size", |
| | type=int, |
| | required=True, |
| | help="Vocabulary size to use for tokenizer" |
| | ) |
| | parser.add_argument( |
| | "--use_config", |
| | choices=['xlm-roberta', 'vanilla','gemma'], |
| | required=True, |
| | help="Use XLM-RoBERTa config or Vanilla BPE" |
| | ) |
| | parser.add_argument( |
| | "--do_evaluate", |
| | action='store_true', |
| | help="Enable evaluation." |
| | ) |
| | args = parser.parse_known_args() |
| | return args |
| | def train_tokenizer(args): |
| | |
| | indic_datasets_en= load_dataset('satpalsr/indicCorpv2', 'en', split='train', cache_dir='/sml1/atul/CENTRAL_CACHE')['text'][:205090] |
| | |
| | |
| | combined_train_set=indic_datasets_en |
| | data = { |
| | "train":{"text": combined_train_set}, |
| | "validation": {"text": []}, |
| | "test": {"text": []}, |
| | } |
| | |
| | custom_dataset = DatasetDict() |
| | for split in data: |
| | custom_dataset[split] = Dataset.from_dict(data[split]) |
| | custom_dataset=custom_dataset["train"] |
| | def batch_iterator(): |
| | for idx in range(0, len(custom_dataset), args.batch_size): |
| | yield custom_dataset[idx: idx + args.batch_size]['text'] |
| |
|
| | if args.use_config == 'vanilla': |
| | tokenizer = Tokenizer(models.BPE()) |
| | tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False) |
| | print(f"[INFO] The brown fox jumped over the lazy dog\n{tokenizer.pre_tokenizer.pre_tokenize_str('The brown fox jumped over the lazy dog')}") |
| | print(f"[INFO] Training...") |
| | trainer = trainers.BpeTrainer(vocab_size=args.vocab_size, special_tokens=["<|endoftext|>"]) |
| | tokenizer.train_from_iterator(batch_iterator(), trainer=trainer) |
| | tokenizer.post_processor = processors.ByteLevel(trim_offsets=False) |
| | tokenizer.decoder = decoders.ByteLevel() |
| | tokenizer = GPT2TokenizerFast(tokenizer_object=tokenizer) |
| | elif args.use_config == 'xlm-roberta': |
| | print("skipped") |
| | tokenizer = AutoTokenizer.from_pretrained('xlm-roberta-base') |
| | trained_tokenizer = tokenizer.train_new_from_iterator(batch_iterator(), vocab_size=args.vocab_size) |
| | elif args.use_config == 'gemma': |
| | print("skipped") |
| | tokenizer = AutoTokenizer.from_pretrained('hf-internal-testing/dummy-gemma') |
| | trained_tokenizer = tokenizer.train_new_from_iterator(batch_iterator(), vocab_size=args.vocab_size) |
| |
|
| | trained_tokenizer.save_pretrained('hi-indiccorp-gemma-bgpt-bpe-tokenizer1') |
| | print(f"[INFO] Tokenizer saved to disk") |
| |
|
| |
|
| |
|
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | def main(): |
| | args, _ = parse_arguments() |
| | train_tokenizer(args) |
| |
|
| | if __name__ == "__main__": |
| | main() |
| | |