| from datasets import concatenate_datasets, load_dataset, load_from_disk |
| import argparse |
| from tokenizers import Tokenizer, decoders, models, pre_tokenizers, processors, trainers |
| from transformers import GPT2TokenizerFast, AutoTokenizer |
| from datasets import config |
| from datasets import DatasetDict, Dataset |
| import logging |
|
|
| def initialize_logger(log_file): |
| logging.basicConfig(filename=log_file, level=logging.INFO, format='%(asctime)s: %(message)s') |
|
|
| def log_parameters(vocab_size, batch_size, fertility_score, proportion_continued_words, log_file='parameters.log'): |
| initialize_logger(log_file) |
| logging.info(f"Vocabulary Size: {vocab_size}, Batch Size: {batch_size}, Fertility Score: {fertility_score}, Proportion of Continued word: {proportion_continued_words}") |
|
|
| def parse_arguments(): |
| parser = argparse.ArgumentParser() |
| parser.add_argument( |
| "--batch_size", |
| type=int, |
| required=True, |
| help="Batch size to use for training" |
| ) |
| parser.add_argument( |
| "--vocab_size", |
| type=int, |
| required=True, |
| help="Vocabulary size to use for tokenizer" |
| ) |
| parser.add_argument( |
| "--use_config", |
| choices=['xlm-roberta', 'vanilla','llama'], |
| required=True, |
| help="Use XLM-RoBERTa config or Vanilla BPE" |
| ) |
| parser.add_argument( |
| "--do_evaluate", |
| action='store_true', |
| help="Enable evaluation." |
| ) |
| args = parser.parse_known_args() |
| return args |
| def train_tokenizer(args): |
|
|
| code_dataset_go= load_dataset('code_x_glue_ct_code_to_text','go',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] |
| code_dataset_java= load_dataset('code_x_glue_ct_code_to_text','java',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] |
| code_dataset_javascript= load_dataset('code_x_glue_ct_code_to_text','javascript',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] |
| code_dataset_php= load_dataset('code_x_glue_ct_code_to_text','php',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] |
| code_dataset_python= load_dataset('code_x_glue_ct_code_to_text','python',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] |
| code_dataset_ruby= load_dataset('code_x_glue_ct_code_to_text','ruby',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] |
|
|
| indic_datasets_hi= load_dataset('ai4bharat/sangraha', data_dir="verified/hin", cache_dir='/sml2/atul/CENTRAL_CACHE')['train']['text'][:6000000] |
| indic_datasets_bn= load_dataset('ai4bharat/sangraha', data_dir="verified/ben", cache_dir='/sml2/atul/CENTRAL_CACHE')['train']['text'][:6000000] |
| wikipedia_en = load_dataset("wikipedia", "20220301.en", cache_dir='/sml2/atul/CENTRAL_CACHE')['train']['text'][:1000000] |
| |
|
|
| combined_train_set=code_dataset_go+code_dataset_java+code_dataset_javascript+code_dataset_php+code_dataset_python+code_dataset_ruby+indic_datasets_hi+indic_datasets_bn+wikipedia_en |
|
|
| data = { |
| "train":{"text": combined_train_set}, |
| "validation": {"text": []}, |
| "test": {"text": []}, |
| } |
| |
| custom_dataset = DatasetDict() |
| for split in data: |
| custom_dataset[split] = Dataset.from_dict(data[split]) |
| custom_dataset=custom_dataset["train"] |
|
|
| def batch_iterator(): |
| for idx in range(0, len(custom_dataset), args.batch_size): |
| yield custom_dataset[idx: idx + args.batch_size]['text'] |
|
|
| new_line="\n" |
| replacing_dict={} |
| for i in range(5,25): |
| replacecable_token="<|reserved_special_token_"+str(i)+"|>" |
| replacing_dict[replacecable_token]=new_line |
| new_line+="\n" |
| if args.use_config == 'vanilla': |
| tokenizer = Tokenizer(models.BPE()) |
| tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False) |
| print(f"[INFO] The brown fox jumped over the lazy dog\n{tokenizer.pre_tokenizer.pre_tokenize_str('The brown fox jumped over the lazy dog')}") |
| print(f"[INFO] Training...") |
| trainer = trainers.BpeTrainer(vocab_size=args.vocab_size, special_tokens=["<|endoftext|>"]) |
| tokenizer.train_from_iterator(batch_iterator(), trainer=trainer) |
| tokenizer.post_processor = processors.ByteLevel(trim_offsets=False) |
| tokenizer.decoder = decoders.ByteLevel() |
| tokenizer = GPT2TokenizerFast(tokenizer_object=tokenizer) |
| elif args.use_config == 'xlm-roberta': |
| tokenizer = AutoTokenizer.from_pretrained('xlm-roberta-base') |
| trained_tokenizer = tokenizer.train_new_from_iterator(batch_iterator(), vocab_size=args.vocab_size) |
| elif args.use_config == 'llama': |
| print("skipped") |
| tokenizer = AutoTokenizer.from_pretrained('meta-llama/Meta-Llama-3-8B') |
| print(tokenizer) |
| trained_tokenizer = tokenizer.train_new_from_iterator(batch_iterator(), vocab_size=args.vocab_size, new_special_tokens=["<unk>","<pad>","<mask>"],special_tokens_map=replacing_dict) |
|
|
| trained_tokenizer.save_pretrained('hi-sanghara-xlmr-bgpt-bpe-tokenizer1') |
| print(f"[INFO] Tokenizer saved to disk") |
|
|
|
|
| def main(): |
| args, _ = parse_arguments() |
| train_tokenizer(args) |
|
|
| if __name__ == "__main__": |
| main() |
| |