File size: 1,991 Bytes
5b8d6fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import numpy as np
import pandas as pd
import re
from t5_tokenizer_model import SentencePieceAtomwiseTokenizer
from pretokenizer import atomwise_tokenizer
from tqdm import tqdm



vocab_size = 32_000
input_sentence_size = None

# Initialize a dataset
#dataset = load_dataset('csv', data_files='/home/zoez/Chem-T5/train-file.csv',split="train")
dataset = pd.read_csv('/home/zoez/Chem-T5/train-file.csv')

tokenizer = SentencePieceAtomwiseTokenizer(unk_token="<unk>", eos_token="</s>", pad_token="<pad>")
dataset.columns=['SMILES']
#print(dataset.columns)
dataset=pd.DataFrame(columns=['SMILES','SMILESs'],data=dataset)
dataset.fillna('', inplace=True)
#print(dataset.iloc[0])
#trainset=pd.DataFrame(columns=['SMILESs'])

for i, line in tqdm(enumerate(dataset['SMILES'])):
    line = re.sub('\d+\t', '',line)
    #print(line)
    newLine=atomwise_tokenizer(line)
    #print(newLine)
    #print(int(i/10))
    dataset.iloc[int(i/50)]['SMILESs']+="&"+newLine
    #print(dataset.loc[int(i/10)]['SMILESs'])
    #dataset.iloc[i]['SMILES']=newLine
#dataset = dataset.iloc
    
#print(dataset.iloc[5]['SMILESs'])
# Build an iterator over this dataset
def batch_iterator(input_sentence_size=None):
    if input_sentence_size is None:
        input_sentence_size = len(dataset)
    batch_length = 100
    for i in range(0, input_sentence_size, batch_length):
        #print(dataset[i: i + batch_length]['SMILES'])
        yield dataset[i: i + batch_length]['SMILESs']


# Train tokenizer
tokenizer.train_from_iterator(
    iterator=batch_iterator(input_sentence_size=input_sentence_size),
    vocab_size=vocab_size,
    show_progress=True,
)


# Save files to disk
tokenizer.save("/home/zoez/chemT5/tokenizer.json")


print(tokenizer.encode(atomwise_tokenizer("O=[N+]([O-])c1ccc(Cl)cc1O=[N+]([O-])c1ccc(Cl)cc1")).tokens)

#from transformers import T5Config


#config = T5Config.from_pretrained("google/t5-v1_1-base", vocab_size=tokenizer.get_vocab_size())
#config.save_pretrained("/home/zoez/chem-T5")