|
|
import os |
|
|
import torch |
|
|
import numpy as np |
|
|
import pandas as pd |
|
|
import h5py |
|
|
import re |
|
|
from omegaconf import OmegaConf |
|
|
import h5py |
|
|
import lightning as L |
|
|
from pera.nn import BidirectionalModel, sample_components_from_bidirectional_transformer, sample_perturbations, sample_embedding_perturbations |
|
|
from esm.tokenization.sequence_tokenizer import EsmSequenceTokenizer |
|
|
from Bio.Seq import Seq |
|
|
from Bio.PDB import PDBList, PDBParser, is_aa |
|
|
|
|
|
device = torch.device("cuda:0") |
|
|
|
|
|
|
|
|
three_to_one = { |
|
|
'ALA': 'A', 'ARG': 'R', 'ASN': 'N', 'ASP': 'D', |
|
|
'CYS': 'C', 'GLN': 'Q', 'GLU': 'E', 'GLY': 'G', |
|
|
'HIS': 'H', 'ILE': 'I', 'LEU': 'L', 'LYS': 'K', |
|
|
'MET': 'M', 'PHE': 'F', 'PRO': 'P', 'SER': 'S', |
|
|
'THR': 'T', 'TRP': 'W', 'TYR': 'Y', 'VAL': 'V', |
|
|
'SEC': 'U', 'PYL': 'O', 'ASX': 'B', 'GLX': 'Z', |
|
|
'XLE': 'J', 'UNK': 'X' |
|
|
} |
|
|
|
|
|
def get_backbone_coords_from_local_pdb(pdb_path, chain_id='A', sequence_length=None, target="data", device=device): |
|
|
""" |
|
|
Load backbone coordinates and residue types from a local PDB file. |
|
|
|
|
|
Returns: |
|
|
coords_tensor: torch.Tensor of shape (1, N, 3, 3) |
|
|
residue_types: List of one-letter residue codes |
|
|
""" |
|
|
parser = PDBParser(QUIET=True) |
|
|
structure = parser.get_structure("local_structure", pdb_path) |
|
|
|
|
|
coords = [] |
|
|
residue_types = [] |
|
|
model = structure[0] |
|
|
|
|
|
if chain_id not in model: |
|
|
raise ValueError(f"Chain {chain_id} not found in {pdb_path}") |
|
|
|
|
|
chain = model[chain_id] |
|
|
|
|
|
for residue in chain: |
|
|
if sequence_length is not None and len(coords) >= sequence_length: |
|
|
break |
|
|
if not is_aa(residue): |
|
|
continue |
|
|
try: |
|
|
n = residue['N'].get_coord() |
|
|
ca = residue['CA'].get_coord() |
|
|
c = residue['C'].get_coord() |
|
|
coords.append([n, ca, c]) |
|
|
resname = residue.get_resname().upper() |
|
|
residue_types.append(three_to_one.get(resname, 'X')) |
|
|
except KeyError: |
|
|
continue |
|
|
|
|
|
if not coords: |
|
|
raise ValueError("No residues with complete backbone atoms found.") |
|
|
|
|
|
|
|
|
pad = [[float('inf')]*3, [float('inf')]*3, [float('inf')]*3] |
|
|
coords.insert(0, pad) |
|
|
coords.append(pad) |
|
|
|
|
|
if target == "ParD2": |
|
|
coords = [pad, pad] + coords + [pad, pad] |
|
|
elif target == "ParD3": |
|
|
coords = [pad]*2 + coords + [pad]*6 |
|
|
elif target == "TrpB4": |
|
|
coords = [pad] + coords |
|
|
|
|
|
coords_tensor = torch.tensor(coords, device=device).unsqueeze(0) |
|
|
|
|
|
return coords_tensor, residue_types |
|
|
|
|
|
sequence_tokenizer = EsmSequenceTokenizer() |
|
|
|
|
|
import argparse |
|
|
|
|
|
parser = parser = argparse.ArgumentParser(description="Calculating the log-likelihood of a sequence") |
|
|
parser.add_argument('--target', type=str, required=True, help='Dataset as a string') |
|
|
parser.add_argument('--num_samples', type=int, required=False, default=384, help='Number of samples to process (default: 100000)') |
|
|
parser.add_argument('--alignment_round', type=int, required=False, default=1, help='Alignment round as an integer') |
|
|
parser.add_argument('--version_number', type=str, required=False, default=1, help='Version number as a string') |
|
|
parser.add_argument('--replicate', type=int, required=False, default=1, help='Replicate number as an integer') |
|
|
args = parser.parse_args() |
|
|
|
|
|
target = args.target |
|
|
alignment_round = args.alignment_round |
|
|
version_number = args.version_number |
|
|
num_samples = args.num_samples |
|
|
replicate = args.replicate |
|
|
|
|
|
datasets = [f"{target}/base_model_{num_samples}"] |
|
|
for i in range(alignment_round): |
|
|
datasets.append(f"{target}/aligned_{i}_{num_samples}_{replicate}") |
|
|
|
|
|
data_root_path = "/global/cfs/projectdirs/m4235/sebastian/data" |
|
|
|
|
|
sequence_tokenizer = EsmSequenceTokenizer() |
|
|
|
|
|
cfg_filename = f"{target}/lightning_logs/{version_number}/config.yaml" |
|
|
network_filename = f"{target}/lightning_logs/{version_number}/checkpoints/best_model.ckpt" |
|
|
|
|
|
cfg = OmegaConf.load(cfg_filename) |
|
|
|
|
|
sampling_temperature=1 |
|
|
OmegaConf.update(cfg, "train.lightning_model_args.sampling_temperature", sampling_temperature) |
|
|
esm_model = BidirectionalModel(cfg["nn"]["model"], |
|
|
cfg["nn"]["model_args"], |
|
|
**cfg["train"]["lightning_model_args"]).to(device) |
|
|
esm_model.load_model_from_ckpt(network_filename) |
|
|
esm_model.eval() |
|
|
print("") |
|
|
mask_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["mask"] |
|
|
bos_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["bos"] |
|
|
eos_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["eos"] |
|
|
pad_token_sequence = cfg["nn"]["model_args"]["residue_token_info"]["pad"] |
|
|
|
|
|
for data in datasets: |
|
|
save_folder_name = data |
|
|
|
|
|
data = data.split("/")[0] |
|
|
|
|
|
os.makedirs(save_folder_name, exist_ok=True) |
|
|
if not data.startswith("TrpB") and not data.startswith("DHFR"): |
|
|
df = pd.read_csv(f"{data_root_path}/{data}/scale2max/{data}.csv") |
|
|
with open(f"{data_root_path}/{data}/{data}.fasta", "r") as file: |
|
|
parent_sequence_decoded = file.readlines()[1].strip() |
|
|
elif data.startswith("DHFR"): |
|
|
print("Loading DHFR data...") |
|
|
df = pd.read_csv(f"{data_root_path}/{data}/scale2max/{data}.csv") |
|
|
with open(f"{data_root_path}/{data}/{data}.fasta", "r") as file: |
|
|
nucleotide_seq = file.readlines()[1].strip() |
|
|
nucleotide_seq = Seq(nucleotide_seq) |
|
|
parent_sequence_decoded = str(nucleotide_seq.translate()) |
|
|
else: |
|
|
df = pd.read_csv(f"{data_root_path}/TrpB/scale2max/{data}.csv") |
|
|
with open(f"{data_root_path}/TrpB/TrpB.fasta", "r") as file: |
|
|
parent_sequence_decoded = file.readlines()[1].strip() |
|
|
|
|
|
if data != "GB1": |
|
|
muts = df["muts"].iloc[0] |
|
|
else: |
|
|
muts = df["muts"].iloc[100000] |
|
|
|
|
|
numbers = re.findall(r'\d+', muts) |
|
|
mask_indices = list(map(int, numbers)) |
|
|
num_masks_per_sequence = num_samples // 4 |
|
|
num_to_generate_per_mask = 4 |
|
|
|
|
|
|
|
|
parent_sequence = torch.tensor(sequence_tokenizer.encode(parent_sequence_decoded, |
|
|
add_special_tokens=True), device=device).unsqueeze(0).long() |
|
|
sequence_length = parent_sequence.shape[1] |
|
|
print(sequence_length, parent_sequence.shape, parent_sequence_decoded) |
|
|
|
|
|
print(save_folder_name) |
|
|
|
|
|
trpb = torch.load(f"./{save_folder_name}/trpb_{replicate}.pt") |
|
|
all_unmasked_sequences_decoded = trpb["all_unmasked_sequences_decoded"] |
|
|
all_unmasked_sequences = trpb["all_unmasked_sequences"] |
|
|
all_masked_sequences = trpb["all_masked_sequences"] |
|
|
all_unmasked_sequences = all_unmasked_sequences.reshape(-1, all_unmasked_sequences.shape[-1]) |
|
|
|
|
|
all_logps = [] |
|
|
|
|
|
print(all_masked_sequences.shape) |
|
|
|
|
|
for i in range(0, all_masked_sequences.shape[0], num_to_generate_per_mask): |
|
|
masked_sequences = all_masked_sequences[i:i+num_to_generate_per_mask] |
|
|
unmasked_sequences = all_unmasked_sequences[i:i+num_to_generate_per_mask] |
|
|
|
|
|
sequence_id = torch.ones((num_to_generate_per_mask, sequence_length), device=device).long() * 1 |
|
|
|
|
|
structure_tokens = torch.ones((num_to_generate_per_mask, sequence_length), device=device).long() * 4096 |
|
|
structure_tokens[:, 0] = 4098 |
|
|
structure_tokens[:, -1] = 4097 |
|
|
|
|
|
coords, residue_types = get_backbone_coords_from_local_pdb(f"{data_root_path}/{data}/{data}.pdb", chain_id='A', sequence_length=sequence_length-2, target=data) if not data.startswith("TrpB") else get_backbone_coords_from_local_pdb(f"{data_root_path}/TrpB/TrpB.pdb", chain_id='A', sequence_length=sequence_length-2, target=data) |
|
|
|
|
|
|
|
|
coords_trimmed = coords[:, 1:-1] |
|
|
|
|
|
|
|
|
valid_mask = ~(torch.isinf(coords_trimmed).view(-1, 9).any(dim=1)) |
|
|
residues_to_compare = [r for r, valid in zip(list(parent_sequence_decoded), valid_mask) if valid] |
|
|
|
|
|
if residue_types != residues_to_compare: |
|
|
print("Residue mismatch detected!") |
|
|
for i, (ref, pdb) in enumerate(zip(residues_to_compare, residue_types)): |
|
|
if ref != pdb: |
|
|
print(f"Position {i}: expected {ref}, got {pdb}") |
|
|
else: |
|
|
print("Residues match.") |
|
|
print(coords.shape) |
|
|
|
|
|
assert coords.shape[1] == sequence_length, f"Coords length {coords.shape[1]} does not match sequence length {sequence_length}" |
|
|
|
|
|
|
|
|
coords = coords.repeat(num_to_generate_per_mask, 1, 1, 1) |
|
|
|
|
|
average_plddt = torch.ones((num_to_generate_per_mask), device=device) |
|
|
|
|
|
per_res_plddt = torch.zeros((num_to_generate_per_mask, sequence_length), device=device) |
|
|
ss8_tokens = torch.zeros((num_to_generate_per_mask, sequence_length), device=device).long() |
|
|
sasa_tokens = torch.zeros((num_to_generate_per_mask, sequence_length), device=device).long() |
|
|
|
|
|
function_tokens = torch.zeros((num_to_generate_per_mask, sequence_length, 8), device=device).long() |
|
|
residue_annotation_tokens = torch.zeros((num_to_generate_per_mask, sequence_length, 16), device=device).long() |
|
|
masked_indices = (masked_sequences == mask_token_sequence).float() |
|
|
|
|
|
with torch.no_grad(): |
|
|
logits = esm_model.nn(sequence_tokens=masked_sequences, |
|
|
structure_tokens=structure_tokens, |
|
|
average_plddt=average_plddt, |
|
|
per_res_plddt=per_res_plddt, |
|
|
ss8_tokens=ss8_tokens, |
|
|
sasa_tokens=sasa_tokens, |
|
|
function_tokens=function_tokens, |
|
|
residue_annotation_tokens=residue_annotation_tokens, |
|
|
sequence_id=sequence_id, |
|
|
bb_coords=coords)["sequence_logits"].detach() |
|
|
logps = torch.nn.functional.log_softmax(logits/sampling_temperature, dim=-1) |
|
|
logps = torch.gather(logps, dim=-1, index=unmasked_sequences.unsqueeze(-1)).squeeze(-1) |
|
|
logps = (logps * masked_indices).sum(-1).detach() |
|
|
|
|
|
all_logps.append(logps) |
|
|
|
|
|
all_logps = torch.cat(all_logps).view(-1) |
|
|
|
|
|
print(all_logps.shape) |
|
|
|
|
|
|
|
|
to_save = {"parent_sequence": parent_sequence, |
|
|
"all_masked_sequences": all_masked_sequences, |
|
|
"all_unmasked_sequences": all_unmasked_sequences, |
|
|
"all_unmasked_sequences_decoded": all_unmasked_sequences_decoded, |
|
|
"all_logps": all_logps} |
|
|
torch.save(to_save, f"{save_folder_name}/trpb_post_rd_{alignment_round}_{replicate}.pt") |