| | import torch |
| | import esm |
| | import numpy as np |
| | import pandas as pd |
| | from Bio import SeqIO, SwissProt |
| | from tqdm import tqdm |
| | import pickle |
| | from io import StringIO |
| | import requests |
| | import time |
| |
|
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | model.eval() |
| |
|
| | device = torch.device("cpu") |
| | model = model.to(device) |
| |
|
| | try: |
| | df = pd.read_csv( |
| | 'parsed.tsv.gz', |
| | compression = 'gzip', |
| | sep = '\t' |
| | ) |
| |
|
| | print('Read data from disk.') |
| |
|
| | except: |
| | print('Generating data...') |
| | records = [] |
| | for record in tqdm(SwissProt.parse(open("uniprot_sprot.dat"))): |
| |
|
| | go_terms = [xref[1] for xref in record.cross_references if xref[0] == "GO"] |
| |
|
| | |
| | if len(go_terms) < 1: |
| | go_terms = ['No_annotation'] |
| |
|
| | function_comments = [ |
| | c for c in record.comments if c.startswith("-!- FUNCTION:") |
| | ] |
| |
|
| | records.append({ |
| | "Entry": record.entry_name, |
| | "Accession": record.accessions[0], |
| | "Protein Name": record.description, |
| | "Gene Name": record.gene_name, |
| | "Organism": record.organism, |
| | "Sequence": record.sequence, |
| | "Length": len(record.sequence), |
| | "GO": "; ".join(go_terms), |
| | "Function": "\n".join(c for c in record.comments if c.startswith("-!- FUNCTION:")), |
| | }) |
| |
|
| | df = pd.DataFrame(records) |
| |
|
| | nones = df["GO"].str.contains("No_annotation").sum() |
| | print('Sequences with no GO annotation: %d' % nones) |
| |
|
| | df.to_csv( |
| | "parsed.tsv.gz", |
| | compression = 'gzip', |
| | sep = '\t', |
| | index = False |
| | ) |
| |
|
| | |
| | df = df[df['GO'].notna()] |
| |
|
| | kept = [ |
| | "Homo sapiens (Human).", |
| | |
| | |
| | "Mus musculus (Mouse).", |
| | |
| | |
| | |
| | |
| | |
| | ] |
| |
|
| | |
| | df = df[df['Organism'].isin(kept)] |
| | df.to_csv('Kept.csv.gz', compression = 'gzip', index = False) |
| |
|
| | ids = df["Entry"].tolist() |
| | sequences = df["Sequence"].tolist() |
| |
|
| | with open("metadata.pkl", "wb") as f: |
| | pickle.dump({"ids": ids, "sequences": sequences}, f) |
| |
|
| | data = list(zip(ids, sequences)) |
| |
|
| | if model.num_layers > 6: |
| | layer = 33 |
| | else: |
| | layer = 6 |
| |
|
| | try: |
| | embeddings = [] |
| |
|
| | |
| | |
| | history = 0 |
| |
|
| | start = history |
| | data = data[history:] |
| | print('REMAINING ENTRIES:', len(data)) |
| |
|
| | print("Generating embeddings...") |
| |
|
| | print("Starting on row %d" % history) |
| |
|
| | print("Using layer %d of %d" % (layer, model.num_layers)) |
| | for i in tqdm(range(len(data))): |
| | batch_labels, batch_strs, batch_tokens = batch_converter([data[i]]) |
| | batch_tokens = batch_tokens.to(device) |
| |
|
| | with torch.no_grad(): |
| | results = model(batch_tokens, repr_layers=[layer]) |
| | |
| | token_representations = results["representations"][layer] |
| |
|
| | tokens = batch_tokens[0] |
| |
|
| | sequence_length = (tokens != alphabet.padding_idx).nonzero().size(0) |
| | residue_embeddings = token_representations[0, 1:sequence_length-1] |
| | |
| | |
| | emb = residue_embeddings.mean(dim=0).cpu().numpy() |
| |
|
| | embeddings.append(emb) |
| |
|
| | history += 1 |
| |
|
| | except: |
| | |
| | |
| | |
| | X = np.stack(embeddings) |
| | np.save("X_embeddings_incomplete_%d_to_%d.npy" % (start, history-1), X) |
| | with open('Data_incomplete_%d_to_%d.pkl' % (start, history-1), 'wb') as f: |
| | pickle.dump(data, f) |
| | exit() |
| |
|
| | X = np.stack(embeddings) |
| | np.save("X_embeddings_incomplete_%d_to_%d.npy" % (start, history), X) |
| |
|
| | |
| | |
| | |
| |
|