File size: 4,726 Bytes
f76346b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
import torch
import esm
import numpy as np
import pandas as pd
from Bio import SeqIO, SwissProt
from tqdm import tqdm
import pickle
from io import StringIO
import requests
import time

#model, alphabet = esm.pretrained.esm2_t48_15B_UR50D()

# Load from disk?
#checkpoint_path = "esm2_t48_15B_UR50D.pt"
#checkpoint_path = "esm2_t6_8M_UR50D.pt"
#model_data = torch.load(checkpoint_path, weights_only = False)
#model, alphabet = esm.pretrained.load_model_and_alphabet_core(checkpoint_path, model_data = model_data)
#batch_converter = alphabet.get_batch_converter()

model.eval()

device = torch.device("cpu")
model = model.to(device)

try:
    df = pd.read_csv(
        'parsed.tsv.gz',
        compression = 'gzip',
        sep = '\t'
    )

    print('Read data from disk.')

except:
    print('Generating data...')
    records = []
    for record in tqdm(SwissProt.parse(open("uniprot_sprot.dat"))):

        go_terms = [xref[1] for xref in record.cross_references if xref[0] == "GO"]

        # Ignorar proteínas sin ninguna función anotada
        if len(go_terms) < 1:
            go_terms = ['No_annotation']

        function_comments = [
            c for c in record.comments if c.startswith("-!- FUNCTION:")
        ]

        records.append({
            "Entry": record.entry_name,
            "Accession": record.accessions[0],
            "Protein Name": record.description,
            "Gene Name": record.gene_name,
            "Organism": record.organism,
            "Sequence": record.sequence,
            "Length": len(record.sequence),
            "GO": "; ".join(go_terms),
            "Function": "\n".join(c for c in record.comments if c.startswith("-!- FUNCTION:")),
        })

    df = pd.DataFrame(records)

    nones = df["GO"].str.contains("No_annotation").sum()
    print('Sequences with no GO annotation: %d' % nones)

    df.to_csv(
        "parsed.tsv.gz",
        compression = 'gzip',
        sep = '\t',
        index = False
    )

# Don't keep any non-annotated proteins
df = df[df['GO'].notna()]

kept = [
    "Homo sapiens (Human).",
    #"Escherichia coli.",
    #"Rattus norvegicus (Rat).",
    "Mus musculus (Mouse).",
    #"Severe acute respiratory syndrome coronavirus (SARS-CoV).",
    #"Severe acute respiratory syndrome coronavirus 2 (2019-nCoV) (SARS-CoV-2).",
    #"Saccharomyces cerevisiae (Baker's yeast).",
    #"Arabidopsis thaliana (Mouse-ear cress).",
    #"Mycobacterium tuberculosis."
]

# Keep sequences belonging to our species of interest
df = df[df['Organism'].isin(kept)]
df.to_csv('Kept.csv.gz', compression = 'gzip', index = False)

ids = df["Entry"].tolist()
sequences = df["Sequence"].tolist()

with open("metadata.pkl", "wb") as f:
    pickle.dump({"ids": ids, "sequences": sequences}, f)

data = list(zip(ids, sequences))

if model.num_layers > 6:
    layer = 33 # Embeddings obtained from the 33rd layer
else:
    layer = 6

try:
    embeddings = []

    # ROW NUMBER: SPECIFY THIS NUMBER TO START FROM THAT POSITION
    # (if you for some reason stopped the generation and want to continue)
    history = 0

    start = history
    data = data[history:]
    print('REMAINING ENTRIES:', len(data))

    print("Generating embeddings...")

    print("Starting on row %d" % history)

    print("Using layer %d of %d" % (layer, model.num_layers))
    for i in tqdm(range(len(data))):
        batch_labels, batch_strs, batch_tokens = batch_converter([data[i]])
        batch_tokens = batch_tokens.to(device)

        with torch.no_grad():
            results = model(batch_tokens, repr_layers=[layer])
        
        token_representations = results["representations"][layer]

        tokens = batch_tokens[0]

        sequence_length = (tokens != alphabet.padding_idx).nonzero().size(0)
        residue_embeddings = token_representations[0, 1:sequence_length-1]
        
        # Mean pooling
        emb = residue_embeddings.mean(dim=0).cpu().numpy()

        embeddings.append(emb)

        history += 1

except:
    # This will catch the event of stopping before the end. A file will
    # be saved (NPY format). Look at its name to specify
    # where you should be continuing from next time.
    X = np.stack(embeddings)
    np.save("X_embeddings_incomplete_%d_to_%d.npy" % (start, history-1), X)
    with open('Data_incomplete_%d_to_%d.pkl' % (start, history-1), 'wb') as f:
        pickle.dump(data, f)
    exit()

X = np.stack(embeddings)
np.save("X_embeddings_incomplete_%d_to_%d.npy" % (start, history), X)

# Use np.concatenate([part1, part2, ...]) to obtain the whole thing.
# Kept.csv.gz should have the corresponding metadata to all of those embeddings.
# Check the row numbers saved in the file names to parse the metadata in case of doubt.