|
|
import os |
|
|
import torch |
|
|
import numpy as np |
|
|
import pandas as pd |
|
|
import h5py |
|
|
import re |
|
|
from omegaconf import OmegaConf |
|
|
import h5py |
|
|
import lightning as L |
|
|
from pera.nn import BidirectionalModel, sample_components_from_bidirectional_transformer, sample_perturbations, sample_embedding_perturbations |
|
|
from esm.tokenization.sequence_tokenizer import EsmSequenceTokenizer |
|
|
from Bio.Seq import Seq |
|
|
|
|
|
device = torch.device("cuda:0") |
|
|
|
|
|
sequence_tokenizer = EsmSequenceTokenizer() |
|
|
|
|
|
import argparse |
|
|
|
|
|
|
|
|
parser = parser = argparse.ArgumentParser(description="Calculating the log-likelihood of a sequence") |
|
|
parser.add_argument('--target', type=str, required=True, help='Dataset as a string') |
|
|
parser.add_argument('--num_samples', type=int, required=False, default=384, help='Number of samples to process (default: 100000)') |
|
|
parser.add_argument('--alignment_round', type=int, required=False, default=1, help='Alignment round as an integer') |
|
|
parser.add_argument('--version_number', type=str, required=False, default=1, help='Version number as a string') |
|
|
parser.add_argument('--replicate', type=int, required=False, default=1, help='Replicate number as an integer') |
|
|
args = parser.parse_args() |
|
|
|
|
|
target = args.target |
|
|
alignment_round = args.alignment_round |
|
|
version_number = args.version_number |
|
|
num_samples = args.num_samples |
|
|
replicate = args.replicate |
|
|
|
|
|
cfg_filename = f"{target}/lightning_logs_round_{alignment_round}/{version_number}/config.yaml" |
|
|
network_filename = f"{target}/lightning_logs_round_{alignment_round}/{version_number}/checkpoints/best_model.ckpt" |
|
|
save_folder_name = f"{target}/aligned_{alignment_round}_{num_samples}_{replicate}" |
|
|
|
|
|
cfg = OmegaConf.load(cfg_filename) |
|
|
sampling_temperature=1 |
|
|
OmegaConf.update(cfg, "train.lightning_model_args.sampling_temperature", sampling_temperature) |
|
|
OmegaConf.update(cfg, "train.lightning_model_args.better_energy", "lower") |
|
|
esm_model = BidirectionalModel(cfg["nn"]["model"], |
|
|
cfg["nn"]["model_args"], |
|
|
**cfg["train"]["lightning_model_args"]).to(device) |
|
|
esm_model.load_model_from_ckpt(network_filename) |
|
|
esm_model.eval() |
|
|
print("") |
|
|
mask_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["mask"] |
|
|
bos_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["bos"] |
|
|
eos_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["eos"] |
|
|
pad_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["pad"] |
|
|
|
|
|
|
|
|
os.makedirs(save_folder_name, exist_ok=True) |
|
|
|
|
|
past_generations =[f"{target}/base_model_{num_samples}"] |
|
|
for i in range(alignment_round): |
|
|
past_generations.append(f"{target}/aligned_{i}_{num_samples}_{replicate}") |
|
|
|
|
|
previous_unmasked_sequences_decoded = [] |
|
|
|
|
|
for round in past_generations: |
|
|
trpb = torch.load(f"{round}/trpb_{replicate}.pt") |
|
|
previous_unmasked_sequences_decoded.extend(trpb['all_unmasked_sequences_decoded']) |
|
|
|
|
|
assert len(previous_unmasked_sequences_decoded) == len(set(previous_unmasked_sequences_decoded)), "There are duplicate sequences in previous_unmasked_sequences_decoded" |
|
|
print("All elements in previous_unmasked_sequences_decoded are unique.") |
|
|
|
|
|
data = target |
|
|
data_root_path = "/scratch/groups/rotskoff/sebastian/era/protein_era/data" |
|
|
|
|
|
sequence_tokenizer = EsmSequenceTokenizer() |
|
|
|
|
|
if data.startswith("TrpB"): |
|
|
df = pd.read_csv(f"{data_root_path}/TrpB/scale2max/{data}.csv") |
|
|
with open(f"{data_root_path}/TrpB/TrpB.fasta", "r") as file: |
|
|
parent_sequence_decoded = file.readlines()[1].strip() |
|
|
|
|
|
elif data == "DHFR": |
|
|
df = pd.read_csv(f"{data_root_path}/{data}/scale2max/{data}.csv") |
|
|
with open(f"{data_root_path}/{data}/{data}.fasta", "r") as file: |
|
|
nucleotide_seq = file.readlines()[1].strip() |
|
|
nucleotide_seq = Seq(nucleotide_seq) |
|
|
parent_sequence_decoded = str(nucleotide_seq.translate()) |
|
|
|
|
|
else: |
|
|
df = pd.read_csv(f"{data_root_path}/{data}/scale2max/{data}.csv") |
|
|
with open(f"{data_root_path}/{data}/{data}.fasta", "r") as file: |
|
|
parent_sequence_decoded = file.readlines()[1].strip() |
|
|
|
|
|
if data != "GB1": |
|
|
muts = df["muts"].iloc[0] |
|
|
else: |
|
|
muts = df["muts"].iloc[100000] |
|
|
|
|
|
numbers = re.findall(r'\d+', muts) |
|
|
mask_indices = list(map(int, numbers)) |
|
|
num_masks_per_sequence = num_samples // 4 |
|
|
num_to_generate_per_mask = 4 |
|
|
|
|
|
|
|
|
parent_sequence = torch.tensor(sequence_tokenizer.encode(parent_sequence_decoded, |
|
|
add_special_tokens=True), device=device).unsqueeze(0).long() |
|
|
sequence_length = parent_sequence.shape[1] |
|
|
|
|
|
|
|
|
all_masked_sequences = [] |
|
|
all_unmasked_sequences_decoded = [] |
|
|
all_unmasked_sequences = [] |
|
|
all_logps = [] |
|
|
|
|
|
|
|
|
while len(all_unmasked_sequences_decoded) < num_samples: |
|
|
|
|
|
print(len(all_unmasked_sequences_decoded)) |
|
|
|
|
|
masked_sequences = parent_sequence.clone().repeat(num_to_generate_per_mask, 1) |
|
|
masked_sequences[:, mask_indices] = mask_token_sequence |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sequence_id = torch.ones((num_to_generate_per_mask, sequence_length), device=device).long() * 1 |
|
|
|
|
|
structure_tokens = torch.ones((num_to_generate_per_mask, sequence_length), device=device).long() * 4096 |
|
|
structure_tokens[:, 0] = 4098 |
|
|
structure_tokens[:, -1] = 4097 |
|
|
|
|
|
coords = torch.inf * torch.ones((num_to_generate_per_mask, sequence_length, 3, 3), device=device) |
|
|
|
|
|
average_plddt = torch.ones((num_to_generate_per_mask), device=device) |
|
|
|
|
|
per_res_plddt = torch.zeros((num_to_generate_per_mask, sequence_length), device=device) |
|
|
ss8_tokens = torch.zeros((num_to_generate_per_mask, sequence_length), device=device).long() |
|
|
sasa_tokens = torch.zeros((num_to_generate_per_mask, sequence_length), device=device).long() |
|
|
|
|
|
function_tokens = torch.zeros((num_to_generate_per_mask, sequence_length, 8), device=device).long() |
|
|
residue_annotation_tokens = torch.zeros((num_to_generate_per_mask, sequence_length, 16), device=device).long() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with torch.no_grad(): |
|
|
unmasked_sequences = sample_components_from_bidirectional_transformer(transformer_model=esm_model, |
|
|
masked_sequence_tokens=masked_sequences, |
|
|
structure_tokens=structure_tokens, |
|
|
average_plddt=average_plddt, |
|
|
per_res_plddt=per_res_plddt, |
|
|
ss8_tokens=ss8_tokens, |
|
|
sasa_tokens=sasa_tokens, |
|
|
function_tokens=function_tokens, |
|
|
residue_annotation_tokens=residue_annotation_tokens, |
|
|
bb_coords=coords, |
|
|
sequence_id=sequence_id, |
|
|
mask_token_sequence=mask_token_sequence, |
|
|
bos_token_sequence=bos_token_sequence, |
|
|
eos_token_sequence=eos_token_sequence, |
|
|
pad_token_sequence=pad_token_sequence, |
|
|
inference_batch_size=1) |
|
|
|
|
|
|
|
|
|
|
|
masked_indices = (masked_sequences == mask_token_sequence).float() |
|
|
logits = esm_model.nn(sequence_tokens=masked_sequences, |
|
|
structure_tokens=structure_tokens, |
|
|
average_plddt=average_plddt, |
|
|
per_res_plddt=per_res_plddt, |
|
|
ss8_tokens=ss8_tokens, |
|
|
sasa_tokens=sasa_tokens, |
|
|
function_tokens=function_tokens, |
|
|
residue_annotation_tokens=residue_annotation_tokens, |
|
|
sequence_id=sequence_id, |
|
|
bb_coords=coords)["sequence_logits"].detach() |
|
|
logps = torch.nn.functional.log_softmax(logits/sampling_temperature, dim=-1) |
|
|
logps = torch.gather(logps, dim=-1, index=unmasked_sequences.unsqueeze(-1)).squeeze(-1) |
|
|
logps = (logps * masked_indices).sum(-1).detach() |
|
|
|
|
|
decoded_seqs = [sequence.replace(" ", "") for sequence in sequence_tokenizer.batch_decode(unmasked_sequences[:, 1:-1])] |
|
|
for seq, logp, masked_seq, unmasked_seq in zip(decoded_seqs, logps, masked_sequences, unmasked_sequences): |
|
|
if seq in all_unmasked_sequences_decoded or seq in previous_unmasked_sequences_decoded: |
|
|
continue |
|
|
else: |
|
|
all_unmasked_sequences_decoded.append(seq) |
|
|
all_logps.append(logp) |
|
|
all_masked_sequences.append(masked_seq) |
|
|
all_unmasked_sequences.append(unmasked_seq) |
|
|
|
|
|
|
|
|
|
|
|
all_unmasked_sequences_decoded = all_unmasked_sequences_decoded[:num_samples] |
|
|
all_masked_sequences = all_masked_sequences[:num_samples] |
|
|
all_unmasked_sequences = all_unmasked_sequences[:num_samples] |
|
|
all_logps = all_logps[:num_samples] |
|
|
|
|
|
all_masked_sequences = torch.stack(all_masked_sequences, dim=0) |
|
|
all_unmasked_sequences = torch.stack(all_unmasked_sequences, dim=0) |
|
|
all_logps = torch.stack(all_logps, dim=0) |
|
|
|
|
|
|
|
|
|
|
|
to_save = {"parent_sequence": parent_sequence, |
|
|
"all_masked_sequences": all_masked_sequences, |
|
|
"all_unmasked_sequences": all_unmasked_sequences, |
|
|
"all_unmasked_sequences_decoded": all_unmasked_sequences_decoded, |
|
|
"all_logps": all_logps} |
|
|
torch.save(to_save, f"{save_folder_name}/trpb_{replicate}.pt") |
|
|
|