Darkadin commited on
Commit
f76346b
·
verified ·
1 Parent(s): c1c0b22

Upload 4 files

Browse files
Files changed (4) hide show
  1. ESM2_script.py +158 -0
  2. Final_embeddings.npy +3 -0
  3. Final_metadata.csv.gz +3 -0
  4. parsed.tsv.gz +3 -0
ESM2_script.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import esm
3
+ import numpy as np
4
+ import pandas as pd
5
+ from Bio import SeqIO, SwissProt
6
+ from tqdm import tqdm
7
+ import pickle
8
+ from io import StringIO
9
+ import requests
10
+ import time
11
+
12
+ #model, alphabet = esm.pretrained.esm2_t48_15B_UR50D()
13
+
14
+ # Load from disk?
15
+ #checkpoint_path = "esm2_t48_15B_UR50D.pt"
16
+ #checkpoint_path = "esm2_t6_8M_UR50D.pt"
17
+ #model_data = torch.load(checkpoint_path, weights_only = False)
18
+ #model, alphabet = esm.pretrained.load_model_and_alphabet_core(checkpoint_path, model_data = model_data)
19
+ #batch_converter = alphabet.get_batch_converter()
20
+
21
+ model.eval()
22
+
23
+ device = torch.device("cpu")
24
+ model = model.to(device)
25
+
26
+ try:
27
+ df = pd.read_csv(
28
+ 'parsed.tsv.gz',
29
+ compression = 'gzip',
30
+ sep = '\t'
31
+ )
32
+
33
+ print('Read data from disk.')
34
+
35
+ except:
36
+ print('Generating data...')
37
+ records = []
38
+ for record in tqdm(SwissProt.parse(open("uniprot_sprot.dat"))):
39
+
40
+ go_terms = [xref[1] for xref in record.cross_references if xref[0] == "GO"]
41
+
42
+ # Ignorar proteínas sin ninguna función anotada
43
+ if len(go_terms) < 1:
44
+ go_terms = ['No_annotation']
45
+
46
+ function_comments = [
47
+ c for c in record.comments if c.startswith("-!- FUNCTION:")
48
+ ]
49
+
50
+ records.append({
51
+ "Entry": record.entry_name,
52
+ "Accession": record.accessions[0],
53
+ "Protein Name": record.description,
54
+ "Gene Name": record.gene_name,
55
+ "Organism": record.organism,
56
+ "Sequence": record.sequence,
57
+ "Length": len(record.sequence),
58
+ "GO": "; ".join(go_terms),
59
+ "Function": "\n".join(c for c in record.comments if c.startswith("-!- FUNCTION:")),
60
+ })
61
+
62
+ df = pd.DataFrame(records)
63
+
64
+ nones = df["GO"].str.contains("No_annotation").sum()
65
+ print('Sequences with no GO annotation: %d' % nones)
66
+
67
+ df.to_csv(
68
+ "parsed.tsv.gz",
69
+ compression = 'gzip',
70
+ sep = '\t',
71
+ index = False
72
+ )
73
+
74
+ # Don't keep any non-annotated proteins
75
+ df = df[df['GO'].notna()]
76
+
77
+ kept = [
78
+ "Homo sapiens (Human).",
79
+ #"Escherichia coli.",
80
+ #"Rattus norvegicus (Rat).",
81
+ "Mus musculus (Mouse).",
82
+ #"Severe acute respiratory syndrome coronavirus (SARS-CoV).",
83
+ #"Severe acute respiratory syndrome coronavirus 2 (2019-nCoV) (SARS-CoV-2).",
84
+ #"Saccharomyces cerevisiae (Baker's yeast).",
85
+ #"Arabidopsis thaliana (Mouse-ear cress).",
86
+ #"Mycobacterium tuberculosis."
87
+ ]
88
+
89
+ # Keep sequences belonging to our species of interest
90
+ df = df[df['Organism'].isin(kept)]
91
+ df.to_csv('Kept.csv.gz', compression = 'gzip', index = False)
92
+
93
+ ids = df["Entry"].tolist()
94
+ sequences = df["Sequence"].tolist()
95
+
96
+ with open("metadata.pkl", "wb") as f:
97
+ pickle.dump({"ids": ids, "sequences": sequences}, f)
98
+
99
+ data = list(zip(ids, sequences))
100
+
101
+ if model.num_layers > 6:
102
+ layer = 33 # Embeddings obtained from the 33rd layer
103
+ else:
104
+ layer = 6
105
+
106
+ try:
107
+ embeddings = []
108
+
109
+ # ROW NUMBER: SPECIFY THIS NUMBER TO START FROM THAT POSITION
110
+ # (if you for some reason stopped the generation and want to continue)
111
+ history = 0
112
+
113
+ start = history
114
+ data = data[history:]
115
+ print('REMAINING ENTRIES:', len(data))
116
+
117
+ print("Generating embeddings...")
118
+
119
+ print("Starting on row %d" % history)
120
+
121
+ print("Using layer %d of %d" % (layer, model.num_layers))
122
+ for i in tqdm(range(len(data))):
123
+ batch_labels, batch_strs, batch_tokens = batch_converter([data[i]])
124
+ batch_tokens = batch_tokens.to(device)
125
+
126
+ with torch.no_grad():
127
+ results = model(batch_tokens, repr_layers=[layer])
128
+
129
+ token_representations = results["representations"][layer]
130
+
131
+ tokens = batch_tokens[0]
132
+
133
+ sequence_length = (tokens != alphabet.padding_idx).nonzero().size(0)
134
+ residue_embeddings = token_representations[0, 1:sequence_length-1]
135
+
136
+ # Mean pooling
137
+ emb = residue_embeddings.mean(dim=0).cpu().numpy()
138
+
139
+ embeddings.append(emb)
140
+
141
+ history += 1
142
+
143
+ except:
144
+ # This will catch the event of stopping before the end. A file will
145
+ # be saved (NPY format). Look at its name to specify
146
+ # where you should be continuing from next time.
147
+ X = np.stack(embeddings)
148
+ np.save("X_embeddings_incomplete_%d_to_%d.npy" % (start, history-1), X)
149
+ with open('Data_incomplete_%d_to_%d.pkl' % (start, history-1), 'wb') as f:
150
+ pickle.dump(data, f)
151
+ exit()
152
+
153
+ X = np.stack(embeddings)
154
+ np.save("X_embeddings_incomplete_%d_to_%d.npy" % (start, history), X)
155
+
156
+ # Use np.concatenate([part1, part2, ...]) to obtain the whole thing.
157
+ # Kept.csv.gz should have the corresponding metadata to all of those embeddings.
158
+ # Check the row numbers saved in the file names to parse the metadata in case of doubt.
Final_embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:beeb378529839823e338973da7a173529050f9e224a0a1875e0a1dfb9757574f
3
+ size 739983488
Final_metadata.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6a0c81a2f0992bc4e8ee598bc4c1827232eb020b2d95d87fe9ad934f0f7bac8
3
+ size 11914082
parsed.tsv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce19f0ecdcc9b8a72eac714810a83f38f7051c29cb56b0185722e8cc2a5048ba
3
+ size 101357737