import os import torch import numpy as np import pandas as pd import h5py import re from omegaconf import OmegaConf import h5py import lightning as L from pera.nn import BidirectionalModel, sample_components_from_bidirectional_transformer, sample_perturbations, sample_embedding_perturbations from esm.tokenization.sequence_tokenizer import EsmSequenceTokenizer from Bio.Seq import Seq device = torch.device("cuda:0") sequence_tokenizer = EsmSequenceTokenizer() import argparse # set up parser parser = parser = argparse.ArgumentParser(description="Calculating the log-likelihood of a sequence") parser.add_argument('--target', type=str, required=True, help='Dataset as a string') parser.add_argument('--num_samples', type=int, required=False, default=384, help='Number of samples to process (default: 100000)') parser.add_argument('--alignment_round', type=int, required=False, default=1, help='Alignment round as an integer') parser.add_argument('--version_number', type=str, required=False, default=1, help='Version number as a string') parser.add_argument('--replicate', type=int, required=False, default=1, help='Replicate number as an integer') args = parser.parse_args() target = args.target alignment_round = args.alignment_round version_number = args.version_number num_samples = args.num_samples replicate = args.replicate datasets = [f"{target}/base_model_{num_samples}"] for i in range(alignment_round): datasets.append(f"{target}/aligned_{i}_{num_samples}_{replicate}") data_root_path = "/scratch/groups/rotskoff/sebastian/era/protein_era/data" sequence_tokenizer = EsmSequenceTokenizer() cfg_filename = f"{target}/lightning_logs_round_{alignment_round}/{version_number}/config.yaml" network_filename = f"{target}/lightning_logs_round_{alignment_round}/{version_number}/checkpoints/best_model.ckpt" cfg = OmegaConf.load(cfg_filename) # sampling_temperature = cfg["train"]["lightning_model_args"]["sampling_temperature"] sampling_temperature=1 OmegaConf.update(cfg, "train.lightning_model_args.sampling_temperature", sampling_temperature) esm_model = BidirectionalModel(cfg["nn"]["model"], cfg["nn"]["model_args"], **cfg["train"]["lightning_model_args"]).to(device) esm_model.load_model_from_ckpt(network_filename) esm_model.eval() print("") mask_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["mask"] bos_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["bos"] eos_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["eos"] pad_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["pad"] for data in datasets: save_folder_name = data data = data.split("/")[0] data = data.split("_")[0] os.makedirs(save_folder_name, exist_ok=True) if not data.startswith("TrpB") and not data.startswith("DHFR"): df = pd.read_csv(f"{data_root_path}/{data}/scale2max/{data}.csv") with open(f"{data_root_path}/{data}/{data}.fasta", "r") as file: parent_sequence_decoded = file.readlines()[1].strip() elif data.startswith("DHFR"): print("Loading DHFR data...") df = pd.read_csv(f"{data_root_path}/{data}/scale2max/{data}.csv") with open(f"{data_root_path}/{data}/{data}.fasta", "r") as file: nucleotide_seq = file.readlines()[1].strip() nucleotide_seq = Seq(nucleotide_seq) parent_sequence_decoded = str(nucleotide_seq.translate()) # Translate to amino acid sequence else: df = pd.read_csv(f"{data_root_path}/TrpB/scale2max/{data}.csv") with open(f"{data_root_path}/TrpB/TrpB.fasta", "r") as file: parent_sequence_decoded = file.readlines()[1].strip() if data != "GB1": muts = df["muts"].iloc[0] else: muts = df["muts"].iloc[100000] numbers = re.findall(r'\d+', muts) mask_indices = list(map(int, numbers)) num_masks_per_sequence = num_samples // 4 num_to_generate_per_mask = 4 parent_sequence = torch.tensor(sequence_tokenizer.encode(parent_sequence_decoded, add_special_tokens=True), device=device).unsqueeze(0).long() sequence_length = parent_sequence.shape[1] print(sequence_length, parent_sequence.shape, parent_sequence_decoded) print(save_folder_name) trpb = torch.load(f"./{save_folder_name}/trpb_{replicate}.pt") all_unmasked_sequences_decoded = trpb["all_unmasked_sequences_decoded"] all_unmasked_sequences = trpb["all_unmasked_sequences"] all_masked_sequences = trpb["all_masked_sequences"] all_unmasked_sequences = all_unmasked_sequences.reshape(-1, all_unmasked_sequences.shape[-1]) all_logps = [] print(all_masked_sequences.shape) for i in range(0, all_masked_sequences.shape[0], num_to_generate_per_mask): masked_sequences = all_masked_sequences[i:i+num_to_generate_per_mask] unmasked_sequences = all_unmasked_sequences[i:i+num_to_generate_per_mask] sequence_id = torch.ones((num_to_generate_per_mask, sequence_length), device=device).long() * 1 structure_tokens = torch.ones((num_to_generate_per_mask, sequence_length), device=device).long() * 4096 structure_tokens[:, 0] = 4098 structure_tokens[:, -1] = 4097 coords = torch.inf * torch.ones((num_to_generate_per_mask, sequence_length, 3, 3), device=device) average_plddt = torch.ones((num_to_generate_per_mask), device=device) per_res_plddt = torch.zeros((num_to_generate_per_mask, sequence_length), device=device) ss8_tokens = torch.zeros((num_to_generate_per_mask, sequence_length), device=device).long() sasa_tokens = torch.zeros((num_to_generate_per_mask, sequence_length), device=device).long() function_tokens = torch.zeros((num_to_generate_per_mask, sequence_length, 8), device=device).long() residue_annotation_tokens = torch.zeros((num_to_generate_per_mask, sequence_length, 16), device=device).long() masked_indices = (masked_sequences == mask_token_sequence).float() with torch.no_grad(): logits = esm_model.nn(sequence_tokens=masked_sequences, structure_tokens=structure_tokens, average_plddt=average_plddt, per_res_plddt=per_res_plddt, ss8_tokens=ss8_tokens, sasa_tokens=sasa_tokens, function_tokens=function_tokens, residue_annotation_tokens=residue_annotation_tokens, sequence_id=sequence_id, bb_coords=coords)["sequence_logits"].detach() logps = torch.nn.functional.log_softmax(logits/sampling_temperature, dim=-1) logps = torch.gather(logps, dim=-1, index=unmasked_sequences.unsqueeze(-1)).squeeze(-1) logps = (logps * masked_indices).sum(-1).detach() all_logps.append(logps) all_logps = torch.cat(all_logps).view(-1) print(all_logps.shape) to_save = {"parent_sequence": parent_sequence, "all_masked_sequences": all_masked_sequences, "all_unmasked_sequences": all_unmasked_sequences, "all_unmasked_sequences_decoded": all_unmasked_sequences_decoded, "all_logps": all_logps} torch.save(to_save, f"{save_folder_name}/trpb_post_rd_{alignment_round}_{replicate}.pt")