| import torch
|
| from dataset import MiniBPETokenizr, ChatDataset, train, SimpleTokenizr
|
| from model import MiniGPT
|
| import json
|
| from tokenizers import Tokenizer, models, trainers, pre_tokenizers, normalizers
|
| from tokenizers.trainers import BpeTrainer
|
| from tokenizers.normalizers import Lowercase, NFD, StripAccents
|
| from tokenizers.pre_tokenizers import Whitespace
|
|
|
|
|
| torch.autograd.set_detect_anomaly(True)
|
|
|
|
|
|
|
|
|
| with open("./data/overfit_data.jsonl", "r", encoding="utf-8") as f:
|
| texts = [(json.loads(line)["input"] + ' ' + json.loads(line)["output"]) for line in f if line.strip()]
|
|
|
| def main():
|
|
|
| tokenizer = Tokenizer(models.BPE(unk_token="<UNK>"))
|
| tokenizer.normalizer = normalizers.Sequence([Lowercase(), NFD(), StripAccents()])
|
| tokenizer.pre_tokenizer = Whitespace()
|
|
|
|
|
| trainer = BpeTrainer(
|
| vocab_size=28517,
|
| special_tokens=["<PAD>", "<UNK>", "<END>", "^User:", "MiniGPT:"]
|
| )
|
| tokenizer.train_from_iterator(texts, trainer)
|
|
|
|
|
| tokenizer.save("./trained-mini-gpt/tokenizer.json")
|
| hf_tokenizer = Tokenizer.from_file("./trained-mini-gpt/tokenizer.json")
|
|
|
|
|
| dataset = ChatDataset(
|
| data="./data/overfit_data.jsonl",
|
| tokenizer=hf_tokenizer
|
| )
|
| model = MiniGPT(vocab_size=hf_tokenizer.get_vocab_size())
|
| model.reset_params()
|
|
|
|
|
|
|
|
|
| train(model, dataset, hf_tokenizer, epochs=200, filepathh="./data/merged_data.jsonl", learning_rate=1e-4)
|
|
|
| if __name__ == "__main__":
|
| main() |