thewall commited on
Commit
deeb753
·
1 Parent(s): 1f225f3

Update deepbind.py

Browse files
Files changed (1) hide show
  1. deepbind.py +49 -45
deepbind.py CHANGED
@@ -2,26 +2,21 @@
2
  /*
3
  Copyright (c) 2023, thewall.
4
  All rights reserved.
5
-
6
  BSD 3-clause license:
7
  Redistribution and use in source and binary forms,
8
  with or without modification, are permitted provided
9
  that the following conditions are met:
10
-
11
  1. Redistributions of source code must retain the
12
  above copyright notice, this list of conditions
13
  and the following disclaimer.
14
-
15
  2. Redistributions in binary form must reproduce
16
  the above copyright notice, this list of conditions
17
  and the following disclaimer in the documentation
18
  and/or other materials provided with the distribution.
19
-
20
  3. Neither the name of the copyright holder nor the
21
  names of its contributors may be used to endorse or
22
  promote products derived from this software without
23
  specific prior written permission.
24
-
25
  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26
  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27
  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -35,7 +30,7 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35
  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36
  */
37
  """
38
-
39
  import datasets
40
  import torch
41
  from torch import nn
@@ -45,14 +40,18 @@ import pandas as pd
45
  from typing import List
46
  from functools import partial
47
 
 
 
48
 
49
  class DeepBind(nn.Module):
50
  ALPHABET = "ATGCN"
51
- ALPHABET_MAP = {key:i for i, key in enumerate(ALPHABET)}
52
  ALPHABET_MAP["U"] = 1
53
  ALPHABET_COMPLEMENT = "TACGN"
54
  COMPLEMENT_ID_MAP = np.array([1, 0, 3, 2, 4])
55
- def __init__(self, reverse_complement=True, num_detectors=16, detector_len=24, has_avg_pooling=True, num_hidden=1, tokenizer=None):
 
 
56
  super(DeepBind, self).__init__()
57
  self.reverse_complement = reverse_complement
58
  self.num_detectors = num_detectors
@@ -64,8 +63,8 @@ class DeepBind(nn.Module):
64
  if has_avg_pooling:
65
  self.avg_pool = nn.AvgPool1d(detector_len)
66
  self.max_pool = nn.MaxPool1d(detector_len)
67
- fcs = [nn.Linear(num_detectors*2 if self.has_avg_pooling else num_detectors, num_hidden)]
68
- if num_hidden>1:
69
  fcs.append(nn.ReLU())
70
  fcs.append(nn.Linear(num_hidden, 1))
71
  self.fc = nn.Sequential(*fcs)
@@ -90,7 +89,7 @@ class DeepBind(nn.Module):
90
 
91
  def build_embedding(self):
92
  """ATGC->ACGT:0321"""
93
- embedding = torch.zeros(5,4)
94
  embedding[0, 0] = 1
95
  embedding[1, 3] = 1
96
  embedding[2, 2] = 1
@@ -105,7 +104,7 @@ class DeepBind(nn.Module):
105
 
106
  def _load_detector(self, fobj):
107
  # dtype = functools.partial(lambda x:torch.Tensor(eval(x))
108
- dtype = lambda x:torch.Tensor(eval(x))
109
  weight1 = self._load_param(fobj, "detectors", dtype).reshape(self.detector_len, 4, self.num_detectors)
110
  biases1 = self._load_param(fobj, "thresholds", dtype)
111
  # Tx4xC->Cx4xT
@@ -114,20 +113,20 @@ class DeepBind(nn.Module):
114
 
115
  def _load_fc1(self, fobj):
116
  num_hidden1 = self.num_detectors * 2 if self.has_avg_pooling else self.num_detectors
117
- dtype = lambda x:torch.Tensor(np.array(eval(x)))
118
  weight1 = self._load_param(fobj, "weights1", dtype).reshape(num_hidden1, self.num_hidden)
119
  biases1 = self._load_param(fobj, "biases1", dtype)
120
  self.fc[0].weight.data = weight1.T.contiguous().to(device=self.fc[0].weight.device)
121
  self.fc[0].bias.data = biases1.to(device=self.fc[0].bias.device)
122
 
123
  def _load_fc2(self, fobj):
124
- dtype = lambda x:torch.Tensor(np.array(eval(x)))
125
  weight2 = self._load_param(fobj, "weights2", dtype)
126
  biases2 = self._load_param(fobj, "biases2", dtype)
127
- assert not (weight2 is None and self.num_hidden>1)
128
- assert not (biases2 is None and self.num_hidden>1)
129
- if self.num_hidden>1:
130
- self.fc[2].weight.data = weight2.reshape(1,-1).to(device=self.fc[2].weight.device)
131
  self.fc[2].bias.data = biases2.to(device=self.fc[2].bias.device)
132
 
133
  @classmethod
@@ -135,27 +134,27 @@ class DeepBind(nn.Module):
135
  line = fobj.readline().strip()
136
  tmp = line.split("=")
137
  assert tmp[0].strip() == param_name
138
- if len(tmp)>1 and len(tmp[1].strip())>0:
139
  return dtype(tmp[1].strip())
140
 
141
  @classmethod
142
  def load_model(cls, sra_id="ERR173157", file=None, ID=None):
143
  if file is None:
144
- config = datasets.load_dataset(path="thewall/deepbindweight", split="all")
145
  if ID is None:
146
- data = pd.read_excel(config[0]['selex'], index_col=0)
147
  ID = data.loc[sra_id]["ID"]
148
- file = datasets.load_dataset(path="thewall/deepbindweight", name=ID, split="all")[0]['config']
149
- keys = [("reverse_complement", lambda x:bool(eval(x))), ("num_detectors", int), ("detector_len", int),
150
- ("has_avg_pooling", lambda x:bool(eval(x))), ("num_hidden", int)]
 
151
  hparams = {}
152
  with open(file) as fobj:
153
  version = fobj.readline()[1:].strip()
154
  for key in keys:
155
  value = cls._load_param(fobj, key[0], key[1])
156
- hparams[key[0]]=value
157
- if hparams['num_hidden']==0:
158
- hparams['num_hidden']=1
159
  model = cls(**hparams)
160
  model._load_detector(fobj)
161
  model._load_fc1(fobj)
@@ -175,9 +174,9 @@ class DeepBind(nn.Module):
175
  return ans
176
 
177
  @torch.no_grad()
178
- def batch_inference(self, sequence: List[str], window_size=0, average_flag=False):
179
- if isinstance(sequence, str):
180
- sequence = [sequence]
181
  self.tokenizer.enable_padding()
182
  encodings = self.tokenizer.encode_batch(sequences)
183
  ids = torch.Tensor([encoding.ids for encoding in encodings]).to(device=self.device)
@@ -195,9 +194,9 @@ class DeepBind(nn.Module):
195
  if window_size < 1:
196
  window_size = int(self.detector_len * 1.5)
197
  scores = torch.zeros_like(seq_len).float()
198
- masked = seq_len<=window_size
199
  for idx in torch.where(masked)[0]:
200
- scores[idx] = self.forward(ids[idx:idx+1, :seq_len[idx]].int())
201
 
202
  fold_ids = F.unfold(ids[~masked].unsqueeze(1).unsqueeze(1), kernel_size=(1, window_size), stride=1)
203
  B, W, G = fold_ids.shape
@@ -205,11 +204,12 @@ class DeepBind(nn.Module):
205
  ans = self.forward(fold_ids.int())
206
  ans = ans.reshape(B, G)
207
  if average_flag:
208
- valid_len = seq_len-window_size+1
209
  for idx, value in zip(torch.where(~masked)[0], ans):
210
  scores[idx] = value[:valid_len[idx]].mean()
211
  else:
212
- unvalid_mask = torch.arange(G).unsqueeze(0).to(seq_len.device)>=(seq_len[~masked]-window_size+1).unsqueeze(1)
 
213
  ans[unvalid_mask] = -torch.inf
214
  scores[~masked] = ans.max(dim=1)[0]
215
  return scores
@@ -226,14 +226,14 @@ class DeepBind(nn.Module):
226
 
227
  def scan_model(self, seq: torch.IntTensor, window_size: int = 0, average_flag: bool = False):
228
  seq_len = seq.shape[1]
229
- if window_size<1:
230
- window_size = int(self.detector_len*1.5)
231
- if seq_len<=window_size:
232
  return self.forward(seq)
233
  else:
234
  scores = []
235
- for i in range(0, seq_len-window_size+1):
236
- scores.append(self.forward(seq[:,i:i+window_size]))
237
  scores = torch.stack(scores, dim=-1)
238
  if average_flag:
239
  return scores.mean(dim=-1)
@@ -241,7 +241,7 @@ class DeepBind(nn.Module):
241
  return scores.max(dim=-1)[0]
242
 
243
  def forward(self, seq: torch.IntTensor):
244
- seq = F.pad(seq, (self.detector_len-1, self.detector_len-1), value=4)
245
  x = self.embedding(seq)
246
  x = x.permute(0, 2, 1)
247
  x = self.detectors(x)
@@ -257,16 +257,16 @@ class DeepBind(nn.Module):
257
  return x
258
 
259
 
260
- if __name__=="__main__":
261
  """
262
  AGGUAAUAAUUUGCAUGAAAUAACUUGGAGAGGAUAGC
263
  AGACAGAGCUUCCAUCAGCGCUAGCAGCAGAGACCAUU
264
  GAGGTTACGCGGCAAGATAA
265
  TACCACTAGGGGGCGCCACC
266
-
267
  To generate 16 predictions (4 models, 4 sequences), run
268
  the deepbind executable as follows:
269
-
270
  % deepbind example.ids < example.seq
271
  D00210.001 D00120.001 D00410.003 D00328.003
272
  7.451420 -0.166146 -0.408751 -0.026180
@@ -284,13 +284,17 @@ if __name__=="__main__":
284
  import random
285
  import time
286
  from tqdm import tqdm
 
287
  sequences = ["".join([random.choice("ATGC") for _ in range(40)]) for i in range(1000)]
 
 
288
  def test_fn(sequences, fn):
289
  start_time = time.time()
290
  for start in tqdm(range(0, len(sequences), 256)):
291
- batch = sequences[start: min(start+256, len(sequences))]
292
  fn(batch)
293
- print(time.time()-start_time)
 
294
 
295
  # test_fn(sequences, model.inference)
296
  # test_fn(sequences, model.batch_inference)
 
2
  /*
3
  Copyright (c) 2023, thewall.
4
  All rights reserved.
 
5
  BSD 3-clause license:
6
  Redistribution and use in source and binary forms,
7
  with or without modification, are permitted provided
8
  that the following conditions are met:
 
9
  1. Redistributions of source code must retain the
10
  above copyright notice, this list of conditions
11
  and the following disclaimer.
 
12
  2. Redistributions in binary form must reproduce
13
  the above copyright notice, this list of conditions
14
  and the following disclaimer in the documentation
15
  and/or other materials provided with the distribution.
 
16
  3. Neither the name of the copyright holder nor the
17
  names of its contributors may be used to endorse or
18
  promote products derived from this software without
19
  specific prior written permission.
 
20
  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21
  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22
  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 
30
  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
  */
32
  """
33
+ import os
34
  import datasets
35
  import torch
36
  from torch import nn
 
40
  from typing import List
41
  from functools import partial
42
 
43
+ MODEL_CONFIG = datasets.load_dataset(path="thewall/deepbindweight", split="all")
44
+ SELEX_CONFIG = pd.read_excel(MODEL_CONFIG[0]['selex'], index_col=0)
45
 
46
  class DeepBind(nn.Module):
47
  ALPHABET = "ATGCN"
48
+ ALPHABET_MAP = {key: i for i, key in enumerate(ALPHABET)}
49
  ALPHABET_MAP["U"] = 1
50
  ALPHABET_COMPLEMENT = "TACGN"
51
  COMPLEMENT_ID_MAP = np.array([1, 0, 3, 2, 4])
52
+
53
+ def __init__(self, reverse_complement=True, num_detectors=16, detector_len=24, has_avg_pooling=True, num_hidden=1,
54
+ tokenizer=None):
55
  super(DeepBind, self).__init__()
56
  self.reverse_complement = reverse_complement
57
  self.num_detectors = num_detectors
 
63
  if has_avg_pooling:
64
  self.avg_pool = nn.AvgPool1d(detector_len)
65
  self.max_pool = nn.MaxPool1d(detector_len)
66
+ fcs = [nn.Linear(num_detectors * 2 if self.has_avg_pooling else num_detectors, num_hidden)]
67
+ if num_hidden > 1:
68
  fcs.append(nn.ReLU())
69
  fcs.append(nn.Linear(num_hidden, 1))
70
  self.fc = nn.Sequential(*fcs)
 
89
 
90
  def build_embedding(self):
91
  """ATGC->ACGT:0321"""
92
+ embedding = torch.zeros(5, 4)
93
  embedding[0, 0] = 1
94
  embedding[1, 3] = 1
95
  embedding[2, 2] = 1
 
104
 
105
  def _load_detector(self, fobj):
106
  # dtype = functools.partial(lambda x:torch.Tensor(eval(x))
107
+ dtype = lambda x: torch.Tensor(eval(x))
108
  weight1 = self._load_param(fobj, "detectors", dtype).reshape(self.detector_len, 4, self.num_detectors)
109
  biases1 = self._load_param(fobj, "thresholds", dtype)
110
  # Tx4xC->Cx4xT
 
113
 
114
  def _load_fc1(self, fobj):
115
  num_hidden1 = self.num_detectors * 2 if self.has_avg_pooling else self.num_detectors
116
+ dtype = lambda x: torch.Tensor(np.array(eval(x)))
117
  weight1 = self._load_param(fobj, "weights1", dtype).reshape(num_hidden1, self.num_hidden)
118
  biases1 = self._load_param(fobj, "biases1", dtype)
119
  self.fc[0].weight.data = weight1.T.contiguous().to(device=self.fc[0].weight.device)
120
  self.fc[0].bias.data = biases1.to(device=self.fc[0].bias.device)
121
 
122
  def _load_fc2(self, fobj):
123
+ dtype = lambda x: torch.Tensor(np.array(eval(x)))
124
  weight2 = self._load_param(fobj, "weights2", dtype)
125
  biases2 = self._load_param(fobj, "biases2", dtype)
126
+ assert not (weight2 is None and self.num_hidden > 1)
127
+ assert not (biases2 is None and self.num_hidden > 1)
128
+ if self.num_hidden > 1:
129
+ self.fc[2].weight.data = weight2.reshape(1, -1).to(device=self.fc[2].weight.device)
130
  self.fc[2].bias.data = biases2.to(device=self.fc[2].bias.device)
131
 
132
  @classmethod
 
134
  line = fobj.readline().strip()
135
  tmp = line.split("=")
136
  assert tmp[0].strip() == param_name
137
+ if len(tmp) > 1 and len(tmp[1].strip()) > 0:
138
  return dtype(tmp[1].strip())
139
 
140
  @classmethod
141
  def load_model(cls, sra_id="ERR173157", file=None, ID=None):
142
  if file is None:
 
143
  if ID is None:
144
+ data = SELEX_CONFIG
145
  ID = data.loc[sra_id]["ID"]
146
+ file = os.path.join(MODEL_CONFIG['config'][0], "params", f"{ID}.txt")
147
+ keys = [("reverse_complement", lambda x: bool(eval(x))), ("num_detectors", int), ("detector_len", int),
148
+ ("has_avg_pooling", lambda x: bool(eval(x))), ("num_hidden", int)]
149
+
150
  hparams = {}
151
  with open(file) as fobj:
152
  version = fobj.readline()[1:].strip()
153
  for key in keys:
154
  value = cls._load_param(fobj, key[0], key[1])
155
+ hparams[key[0]] = value
156
+ if hparams['num_hidden'] == 0:
157
+ hparams['num_hidden'] = 1
158
  model = cls(**hparams)
159
  model._load_detector(fobj)
160
  model._load_fc1(fobj)
 
174
  return ans
175
 
176
  @torch.no_grad()
177
+ def batch_inference(self, sequences: List[str], window_size=0, average_flag=False):
178
+ if isinstance(sequences, str):
179
+ sequences = [sequences]
180
  self.tokenizer.enable_padding()
181
  encodings = self.tokenizer.encode_batch(sequences)
182
  ids = torch.Tensor([encoding.ids for encoding in encodings]).to(device=self.device)
 
194
  if window_size < 1:
195
  window_size = int(self.detector_len * 1.5)
196
  scores = torch.zeros_like(seq_len).float()
197
+ masked = seq_len <= window_size
198
  for idx in torch.where(masked)[0]:
199
+ scores[idx] = self.forward(ids[idx:idx + 1, :seq_len[idx]].int())
200
 
201
  fold_ids = F.unfold(ids[~masked].unsqueeze(1).unsqueeze(1), kernel_size=(1, window_size), stride=1)
202
  B, W, G = fold_ids.shape
 
204
  ans = self.forward(fold_ids.int())
205
  ans = ans.reshape(B, G)
206
  if average_flag:
207
+ valid_len = seq_len - window_size + 1
208
  for idx, value in zip(torch.where(~masked)[0], ans):
209
  scores[idx] = value[:valid_len[idx]].mean()
210
  else:
211
+ unvalid_mask = torch.arange(G).unsqueeze(0).to(seq_len.device) >= (
212
+ seq_len[~masked] - window_size + 1).unsqueeze(1)
213
  ans[unvalid_mask] = -torch.inf
214
  scores[~masked] = ans.max(dim=1)[0]
215
  return scores
 
226
 
227
  def scan_model(self, seq: torch.IntTensor, window_size: int = 0, average_flag: bool = False):
228
  seq_len = seq.shape[1]
229
+ if window_size < 1:
230
+ window_size = int(self.detector_len * 1.5)
231
+ if seq_len <= window_size:
232
  return self.forward(seq)
233
  else:
234
  scores = []
235
+ for i in range(0, seq_len - window_size + 1):
236
+ scores.append(self.forward(seq[:, i:i + window_size]))
237
  scores = torch.stack(scores, dim=-1)
238
  if average_flag:
239
  return scores.mean(dim=-1)
 
241
  return scores.max(dim=-1)[0]
242
 
243
  def forward(self, seq: torch.IntTensor):
244
+ seq = F.pad(seq, (self.detector_len - 1, self.detector_len - 1), value=4)
245
  x = self.embedding(seq)
246
  x = x.permute(0, 2, 1)
247
  x = self.detectors(x)
 
257
  return x
258
 
259
 
260
+ if __name__ == "__main__":
261
  """
262
  AGGUAAUAAUUUGCAUGAAAUAACUUGGAGAGGAUAGC
263
  AGACAGAGCUUCCAUCAGCGCUAGCAGCAGAGACCAUU
264
  GAGGTTACGCGGCAAGATAA
265
  TACCACTAGGGGGCGCCACC
266
+
267
  To generate 16 predictions (4 models, 4 sequences), run
268
  the deepbind executable as follows:
269
+
270
  % deepbind example.ids < example.seq
271
  D00210.001 D00120.001 D00410.003 D00328.003
272
  7.451420 -0.166146 -0.408751 -0.026180
 
284
  import random
285
  import time
286
  from tqdm import tqdm
287
+
288
  sequences = ["".join([random.choice("ATGC") for _ in range(40)]) for i in range(1000)]
289
+
290
+
291
  def test_fn(sequences, fn):
292
  start_time = time.time()
293
  for start in tqdm(range(0, len(sequences), 256)):
294
+ batch = sequences[start: min(start + 256, len(sequences))]
295
  fn(batch)
296
+ print(time.time() - start_time)
297
+
298
 
299
  # test_fn(sequences, model.inference)
300
  # test_fn(sequences, model.batch_inference)