ESM2_embeddings_Human_Mouse / ESM2_script.py
Darkadin's picture
Upload 4 files
f76346b verified
import torch
import esm
import numpy as np
import pandas as pd
from Bio import SeqIO, SwissProt
from tqdm import tqdm
import pickle
from io import StringIO
import requests
import time
#model, alphabet = esm.pretrained.esm2_t48_15B_UR50D()
# Load from disk?
#checkpoint_path = "esm2_t48_15B_UR50D.pt"
#checkpoint_path = "esm2_t6_8M_UR50D.pt"
#model_data = torch.load(checkpoint_path, weights_only = False)
#model, alphabet = esm.pretrained.load_model_and_alphabet_core(checkpoint_path, model_data = model_data)
#batch_converter = alphabet.get_batch_converter()
model.eval()
device = torch.device("cpu")
model = model.to(device)
try:
df = pd.read_csv(
'parsed.tsv.gz',
compression = 'gzip',
sep = '\t'
)
print('Read data from disk.')
except:
print('Generating data...')
records = []
for record in tqdm(SwissProt.parse(open("uniprot_sprot.dat"))):
go_terms = [xref[1] for xref in record.cross_references if xref[0] == "GO"]
# Ignorar proteínas sin ninguna función anotada
if len(go_terms) < 1:
go_terms = ['No_annotation']
function_comments = [
c for c in record.comments if c.startswith("-!- FUNCTION:")
]
records.append({
"Entry": record.entry_name,
"Accession": record.accessions[0],
"Protein Name": record.description,
"Gene Name": record.gene_name,
"Organism": record.organism,
"Sequence": record.sequence,
"Length": len(record.sequence),
"GO": "; ".join(go_terms),
"Function": "\n".join(c for c in record.comments if c.startswith("-!- FUNCTION:")),
})
df = pd.DataFrame(records)
nones = df["GO"].str.contains("No_annotation").sum()
print('Sequences with no GO annotation: %d' % nones)
df.to_csv(
"parsed.tsv.gz",
compression = 'gzip',
sep = '\t',
index = False
)
# Don't keep any non-annotated proteins
df = df[df['GO'].notna()]
kept = [
"Homo sapiens (Human).",
#"Escherichia coli.",
#"Rattus norvegicus (Rat).",
"Mus musculus (Mouse).",
#"Severe acute respiratory syndrome coronavirus (SARS-CoV).",
#"Severe acute respiratory syndrome coronavirus 2 (2019-nCoV) (SARS-CoV-2).",
#"Saccharomyces cerevisiae (Baker's yeast).",
#"Arabidopsis thaliana (Mouse-ear cress).",
#"Mycobacterium tuberculosis."
]
# Keep sequences belonging to our species of interest
df = df[df['Organism'].isin(kept)]
df.to_csv('Kept.csv.gz', compression = 'gzip', index = False)
ids = df["Entry"].tolist()
sequences = df["Sequence"].tolist()
with open("metadata.pkl", "wb") as f:
pickle.dump({"ids": ids, "sequences": sequences}, f)
data = list(zip(ids, sequences))
if model.num_layers > 6:
layer = 33 # Embeddings obtained from the 33rd layer
else:
layer = 6
try:
embeddings = []
# ROW NUMBER: SPECIFY THIS NUMBER TO START FROM THAT POSITION
# (if you for some reason stopped the generation and want to continue)
history = 0
start = history
data = data[history:]
print('REMAINING ENTRIES:', len(data))
print("Generating embeddings...")
print("Starting on row %d" % history)
print("Using layer %d of %d" % (layer, model.num_layers))
for i in tqdm(range(len(data))):
batch_labels, batch_strs, batch_tokens = batch_converter([data[i]])
batch_tokens = batch_tokens.to(device)
with torch.no_grad():
results = model(batch_tokens, repr_layers=[layer])
token_representations = results["representations"][layer]
tokens = batch_tokens[0]
sequence_length = (tokens != alphabet.padding_idx).nonzero().size(0)
residue_embeddings = token_representations[0, 1:sequence_length-1]
# Mean pooling
emb = residue_embeddings.mean(dim=0).cpu().numpy()
embeddings.append(emb)
history += 1
except:
# This will catch the event of stopping before the end. A file will
# be saved (NPY format). Look at its name to specify
# where you should be continuing from next time.
X = np.stack(embeddings)
np.save("X_embeddings_incomplete_%d_to_%d.npy" % (start, history-1), X)
with open('Data_incomplete_%d_to_%d.pkl' % (start, history-1), 'wb') as f:
pickle.dump(data, f)
exit()
X = np.stack(embeddings)
np.save("X_embeddings_incomplete_%d_to_%d.npy" % (start, history), X)
# Use np.concatenate([part1, part2, ...]) to obtain the whole thing.
# Kept.csv.gz should have the corresponding metadata to all of those embeddings.
# Check the row numbers saved in the file names to parse the metadata in case of doubt.