File size: 7,629 Bytes
432d7ec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
import os
import torch
import numpy as np
import pandas as pd
import h5py
import re
from omegaconf import OmegaConf
import h5py
import lightning as L
from pera.nn import BidirectionalModel, sample_components_from_bidirectional_transformer, sample_perturbations, sample_embedding_perturbations
from esm.tokenization.sequence_tokenizer import EsmSequenceTokenizer
from Bio.Seq import Seq

device = torch.device("cuda:0")

sequence_tokenizer = EsmSequenceTokenizer()

import argparse
# set up parser
parser = parser = argparse.ArgumentParser(description="Calculating the log-likelihood of a sequence")
parser.add_argument('--target', type=str, required=True, help='Dataset as a string')
parser.add_argument('--num_samples', type=int, required=False, default=384, help='Number of samples to process (default: 100000)')
parser.add_argument('--alignment_round', type=int, required=False, default=1, help='Alignment round as an integer')
parser.add_argument('--version_number', type=str, required=False, default=1, help='Version number as a string')
parser.add_argument('--replicate', type=int, required=False, default=1, help='Replicate number as an integer')
args = parser.parse_args()

target = args.target
alignment_round = args.alignment_round
version_number = args.version_number
num_samples = args.num_samples
replicate = args.replicate

datasets = [f"{target}/base_model_{num_samples}"]
for i in range(alignment_round):
    datasets.append(f"{target}/aligned_{i}_{num_samples}_{replicate}")

data_root_path = "/scratch/groups/rotskoff/sebastian/era/protein_era/data"

sequence_tokenizer = EsmSequenceTokenizer()

cfg_filename = f"{target}/lightning_logs_round_{alignment_round}/{version_number}/config.yaml"
network_filename = f"{target}/lightning_logs_round_{alignment_round}/{version_number}/checkpoints/best_model.ckpt"

cfg = OmegaConf.load(cfg_filename)
# sampling_temperature = cfg["train"]["lightning_model_args"]["sampling_temperature"]
sampling_temperature=1
OmegaConf.update(cfg, "train.lightning_model_args.sampling_temperature", sampling_temperature)
esm_model = BidirectionalModel(cfg["nn"]["model"], 
                                cfg["nn"]["model_args"],
                                **cfg["train"]["lightning_model_args"]).to(device)
esm_model.load_model_from_ckpt(network_filename)
esm_model.eval()
print("")
mask_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["mask"]
bos_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["bos"]
eos_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["eos"]
pad_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["pad"]

for data in datasets:
    save_folder_name = data
    
    data = data.split("/")[0]
    data = data.split("_")[0]
    
    os.makedirs(save_folder_name, exist_ok=True)
    if not data.startswith("TrpB") and not data.startswith("DHFR"):
        df = pd.read_csv(f"{data_root_path}/{data}/scale2max/{data}.csv")
        with open(f"{data_root_path}/{data}/{data}.fasta", "r") as file:
            parent_sequence_decoded = file.readlines()[1].strip()
    elif data.startswith("DHFR"):
        print("Loading DHFR data...")
        df = pd.read_csv(f"{data_root_path}/{data}/scale2max/{data}.csv")
        with open(f"{data_root_path}/{data}/{data}.fasta", "r") as file:
            nucleotide_seq = file.readlines()[1].strip()
        nucleotide_seq = Seq(nucleotide_seq)
        parent_sequence_decoded = str(nucleotide_seq.translate())  # Translate to amino acid sequence
    else:
        df = pd.read_csv(f"{data_root_path}/TrpB/scale2max/{data}.csv")
        with open(f"{data_root_path}/TrpB/TrpB.fasta", "r") as file:
            parent_sequence_decoded = file.readlines()[1].strip()
            
    if data != "GB1":        
        muts = df["muts"].iloc[0]
    else:
        muts = df["muts"].iloc[100000]
    
    numbers = re.findall(r'\d+', muts)
    mask_indices = list(map(int, numbers))
    num_masks_per_sequence = num_samples // 4
    num_to_generate_per_mask = 4
    

    parent_sequence = torch.tensor(sequence_tokenizer.encode(parent_sequence_decoded, 
                                                                add_special_tokens=True), device=device).unsqueeze(0).long()
    sequence_length = parent_sequence.shape[1]
    print(sequence_length, parent_sequence.shape, parent_sequence_decoded)

    print(save_folder_name)

    trpb = torch.load(f"./{save_folder_name}/trpb_{replicate}.pt")
    all_unmasked_sequences_decoded = trpb["all_unmasked_sequences_decoded"]
    all_unmasked_sequences = trpb["all_unmasked_sequences"]
    all_masked_sequences = trpb["all_masked_sequences"]
    all_unmasked_sequences = all_unmasked_sequences.reshape(-1, all_unmasked_sequences.shape[-1])
    
    all_logps = []

    print(all_masked_sequences.shape)

    for i in range(0, all_masked_sequences.shape[0], num_to_generate_per_mask):
        masked_sequences = all_masked_sequences[i:i+num_to_generate_per_mask]
        unmasked_sequences = all_unmasked_sequences[i:i+num_to_generate_per_mask]
        
        sequence_id = torch.ones((num_to_generate_per_mask, sequence_length), device=device).long() * 1
        
        structure_tokens = torch.ones((num_to_generate_per_mask, sequence_length), device=device).long() * 4096
        structure_tokens[:, 0] = 4098
        structure_tokens[:, -1] = 4097

        coords = torch.inf * torch.ones((num_to_generate_per_mask, sequence_length, 3, 3), device=device)

        average_plddt = torch.ones((num_to_generate_per_mask), device=device)

        per_res_plddt = torch.zeros((num_to_generate_per_mask, sequence_length), device=device)
        ss8_tokens = torch.zeros((num_to_generate_per_mask, sequence_length), device=device).long()
        sasa_tokens = torch.zeros((num_to_generate_per_mask, sequence_length), device=device).long()

        function_tokens = torch.zeros((num_to_generate_per_mask, sequence_length, 8), device=device).long()
        residue_annotation_tokens = torch.zeros((num_to_generate_per_mask, sequence_length, 16), device=device).long()
        masked_indices = (masked_sequences == mask_token_sequence).float()
        
        with torch.no_grad():
            logits = esm_model.nn(sequence_tokens=masked_sequences,
                                    structure_tokens=structure_tokens,
                                    average_plddt=average_plddt,
                                    per_res_plddt=per_res_plddt,
                                    ss8_tokens=ss8_tokens,
                                    sasa_tokens=sasa_tokens,
                                    function_tokens=function_tokens,
                                    residue_annotation_tokens=residue_annotation_tokens,
                                    sequence_id=sequence_id,
                                    bb_coords=coords)["sequence_logits"].detach()
            logps = torch.nn.functional.log_softmax(logits/sampling_temperature, dim=-1)
            logps = torch.gather(logps, dim=-1, index=unmasked_sequences.unsqueeze(-1)).squeeze(-1)
            logps = (logps * masked_indices).sum(-1).detach()

        all_logps.append(logps)

    all_logps = torch.cat(all_logps).view(-1)
    
    print(all_logps.shape)


    to_save = {"parent_sequence": parent_sequence,
                "all_masked_sequences": all_masked_sequences,
                "all_unmasked_sequences": all_unmasked_sequences,
                "all_unmasked_sequences_decoded": all_unmasked_sequences_decoded,
                "all_logps": all_logps}
    torch.save(to_save, f"{save_folder_name}/trpb_post_rd_{alignment_round}_{replicate}.pt")