File size: 2,635 Bytes
521547b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import pandas as pd
import torch
import csv
import numpy as np
from rdkit import Chem
from torch_geometric.data import Data
from torch_geometric.nn import GCNConv
import torch.nn.functional as F
import pickle
from transformers import AutoTokenizer, AutoModel
from utils import *

def load_csv_data(filename):
    mylist = []
    with open(filename) as data:
        csv_data = csv.reader(data, delimiter=',')
        next(csv_data)  # Skip header row
        for row in csv_data:
            if any(row):  # Check if any element in the row is non-empty
                mylist.append(row)
    return mylist

new_list = load_csv_data('Raw_data/Bovine_cleaned_data_with_permeability.csv')
print(f'Total number of sample in .csv file: {len(new_list)}')

# Directory for output
embeddings_output_folder = "Embedded_data_ChemBERTa_with_permeability/Bovine_data"
check_dir(embeddings_output_folder)

# Initialize progress tracker file
progress_tracker_file = f"{embeddings_output_folder}/run_progress_tracker"
data_list = []

# Restart from a specified line number
restart_line = 1  # Change this to the line number from which to restart

# Load previously saved data if restarting
embedded_file_path = f"{embeddings_output_folder}/Embedded_file_upto_{restart_line-1}_restart.pkl"
if restart_line > 1 and os.path.exists(embedded_file_path):
    with open(embedded_file_path, 'rb') as file:
        data_list = pickle.load(file)
        print(f"Resuming from line {restart_line}")

with open(progress_tracker_file, 'w') as file:
    file.write(f"Starting my work from line {restart_line}\n")

for index, item in enumerate(new_list, start=1): # input list is changed from new_list to unique_row
    if index < restart_line:
        continue  # Skip lines before the restart line

    with open(progress_tracker_file, 'a') as file:
        file.write(f"Working on line {index}/{len(new_list)}\n")

    if 0 < len(item[1]):
        smiles = [item[1]]  
        chemBERTa_embedding = smiles_ChemBERTa_embedding(smiles)

        data_list.append([item[0], item[1], item[3], item[4], chemBERTa_embedding, item[5]])

    if index % 500 == 0:
        with open(f"{embeddings_output_folder}/Embedded_file_upto_{index}_restart.pkl", 'wb') as file:
            pickle.dump(data_list, file)

    print(f"The line {index}/{len(new_list)} is completed")
    with open(progress_tracker_file, 'a') as file:
        file.write(f"The line {index}/{len(new_list)} is completed.\n")

with open(f"{embeddings_output_folder}/Embedded_file_complete_ChemBERTa.pkl", 'wb') as file:
    pickle.dump(data_list, file)
    print("Process completed without an error")