chemT5 / tokenizer-trainer_atom.py
ZoeMC's picture
before_train
5b8d6fc
import numpy as np
import pandas as pd
import re
from t5_tokenizer_model import SentencePieceAtomwiseTokenizer
from pretokenizer import atomwise_tokenizer
from tqdm import tqdm
vocab_size = 32_000
input_sentence_size = None
# Initialize a dataset
#dataset = load_dataset('csv', data_files='/home/zoez/Chem-T5/train-file.csv',split="train")
dataset = pd.read_csv('/home/zoez/Chem-T5/train-file.csv')
tokenizer = SentencePieceAtomwiseTokenizer(unk_token="<unk>", eos_token="</s>", pad_token="<pad>")
dataset.columns=['SMILES']
#print(dataset.columns)
dataset=pd.DataFrame(columns=['SMILES','SMILESs'],data=dataset)
dataset.fillna('', inplace=True)
#print(dataset.iloc[0])
#trainset=pd.DataFrame(columns=['SMILESs'])
for i, line in tqdm(enumerate(dataset['SMILES'])):
line = re.sub('\d+\t', '',line)
#print(line)
newLine=atomwise_tokenizer(line)
#print(newLine)
#print(int(i/10))
dataset.iloc[int(i/50)]['SMILESs']+="&"+newLine
#print(dataset.loc[int(i/10)]['SMILESs'])
#dataset.iloc[i]['SMILES']=newLine
#dataset = dataset.iloc
#print(dataset.iloc[5]['SMILESs'])
# Build an iterator over this dataset
def batch_iterator(input_sentence_size=None):
if input_sentence_size is None:
input_sentence_size = len(dataset)
batch_length = 100
for i in range(0, input_sentence_size, batch_length):
#print(dataset[i: i + batch_length]['SMILES'])
yield dataset[i: i + batch_length]['SMILESs']
# Train tokenizer
tokenizer.train_from_iterator(
iterator=batch_iterator(input_sentence_size=input_sentence_size),
vocab_size=vocab_size,
show_progress=True,
)
# Save files to disk
tokenizer.save("/home/zoez/chemT5/tokenizer.json")
print(tokenizer.encode(atomwise_tokenizer("O=[N+]([O-])c1ccc(Cl)cc1O=[N+]([O-])c1ccc(Cl)cc1")).tokens)
#from transformers import T5Config
#config = T5Config.from_pretrained("google/t5-v1_1-base", vocab_size=tokenizer.get_vocab_size())
#config.save_pretrained("/home/zoez/chem-T5")