abd-ur commited on
Commit
8aaecbf
·
verified ·
1 Parent(s): c278a8f

Create model.py

Browse files

Under Development

Files changed (1) hide show
  1. model.py +800 -0
model.py ADDED
@@ -0,0 +1,800 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module provides transformer-based models for processing hierarchical VCF data
3
+ """
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+ import math
9
+ import logging
10
+ from typing import Dict, List, Tuple, Optional, Union, Any
11
+ from dataclasses import dataclass
12
+
13
+ from transformers import PreTrainedModel, PretrainedConfig
14
+ from transformers.modeling_outputs import SequenceClassifierOutput
15
+ from transformers.utils import ModelOutput
16
+
17
+ from config import ModelConfig, ConfigManager
18
+ from tokenizer import HierarchicalVCFTokenizer
19
+
20
+
21
+ # Configure logging
22
+ logging.basicConfig(level=logging.INFO)
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ @dataclass
27
+ class HierarchicalVCFOutput(ModelOutput):
28
+ """
29
+ Args:
30
+ loss: Classification loss (if labels provided)
31
+ logits: Classification logits
32
+ hidden_states: Last hidden states
33
+ attentions: Attention weights from all layers
34
+ hierarchical_embeddings: Embeddings at each hierarchical level
35
+ """
36
+
37
+ loss: Optional[torch.FloatTensor] = None
38
+ logits: torch.FloatTensor = None
39
+ hidden_states: Optional[torch.FloatTensor] = None
40
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
41
+ hierarchical_embeddings: Optional[Dict[str, torch.FloatTensor]] = None
42
+
43
+
44
+ class HierarchicalVCFConfig(PretrainedConfig):
45
+
46
+ model_type = "hierarchical-vcf"
47
+
48
+ def __init__(self,
49
+ vocab_sizes: Optional[Dict[str, int]] = None,
50
+ embed_dim: int = 64,
51
+ transformer_dim: int = 256,
52
+ nhead: int = 8,
53
+ num_layers: int = 3,
54
+ num_classes: int = 2,
55
+ hidden_dims: List[int] = None,
56
+ dropout: float = 0.1,
57
+ activation: str = "gelu",
58
+ layer_norm_eps: float = 1e-12,
59
+ max_position_embeddings: int = 1024,
60
+ use_hierarchical_attention: bool = True,
61
+ use_positional_encoding: bool = True,
62
+ attention_probs_dropout_prob: float = 0.1,
63
+ hidden_dropout_prob: float = 0.1,
64
+ classifier_dropout: Optional[float] = None,
65
+ **kwargs):
66
+
67
+ super().__init__(**kwargs)
68
+
69
+ self.vocab_sizes = vocab_sizes or {
70
+ 'impact': 10, 'ref': 10, 'alt': 10,
71
+ 'chromosome': 30, 'pathway': 100, 'gene': 1000
72
+ }
73
+ self.embed_dim = embed_dim
74
+ self.transformer_dim = transformer_dim
75
+ self.nhead = nhead
76
+ self.num_layers = num_layers
77
+ self.num_classes = num_classes
78
+ self.hidden_dims = hidden_dims or [512, 256]
79
+ self.dropout = dropout
80
+ self.activation = activation
81
+ self.layer_norm_eps = layer_norm_eps
82
+ self.max_position_embeddings = max_position_embeddings
83
+ self.use_hierarchical_attention = use_hierarchical_attention
84
+ self.use_positional_encoding = use_positional_encoding
85
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
86
+ self.hidden_dropout_prob = hidden_dropout_prob
87
+ self.classifier_dropout = classifier_dropout
88
+
89
+
90
+ class PositionalEncoding(nn.Module):
91
+
92
+ def __init__(self, d_model: int, max_len: int = 5000, dropout: float = 0.1):
93
+ super().__init__()
94
+ self.dropout = nn.Dropout(p=dropout)
95
+
96
+ pe = torch.zeros(max_len, d_model)
97
+ position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
98
+ div_term = torch.exp(torch.arange(0, d_model, 2).float() *
99
+ (-math.log(10000.0) / d_model))
100
+
101
+ pe[:, 0::2] = torch.sin(position * div_term)
102
+ pe[:, 1::2] = torch.cos(position * div_term)
103
+ pe = pe.unsqueeze(0).transpose(0, 1)
104
+
105
+ self.register_buffer('pe', pe)
106
+
107
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
108
+ """
109
+ Args:
110
+ x: Tensor of shape [seq_len, batch_size, d_model]
111
+ """
112
+ x = x + self.pe[:x.size(0), :]
113
+ return self.dropout(x)
114
+
115
+
116
+ class MutationEmbedder(nn.Module):
117
+
118
+ def __init__(self, vocab_sizes: Dict[str, int], embed_dim: int = 64, dropout: float = 0.1):
119
+ super().__init__()
120
+
121
+ self.embed_dim = embed_dim
122
+ self.mutation_fields = ['impact', 'ref', 'alt']
123
+
124
+ # Create embedding layers for each field
125
+ self.embed_layers = nn.ModuleDict({
126
+ field: nn.Embedding(vocab_sizes.get(field, 100), embed_dim, padding_idx=0)
127
+ for field in self.mutation_fields
128
+ })
129
+
130
+ # Projection layer to combine embeddings
131
+ self.mutation_dim = embed_dim * len(self.mutation_fields)
132
+ self.projection = nn.Linear(self.mutation_dim, embed_dim)
133
+ self.layer_norm = nn.LayerNorm(embed_dim)
134
+ self.dropout = nn.Dropout(dropout)
135
+
136
+ def forward(self, mutation_batch: Dict[str, torch.Tensor]) -> torch.Tensor:
137
+ """
138
+ Args:
139
+ mutation_batch: Dict with tensors for each field
140
+
141
+ Returns:
142
+ Embedded mutations tensor [batch_size, seq_len, embed_dim]
143
+ """
144
+ embeddings = []
145
+
146
+ for field in self.mutation_fields:
147
+ if field in mutation_batch:
148
+ field_emb = self.embed_layers[field](mutation_batch[field])
149
+ embeddings.append(field_emb)
150
+
151
+ if not embeddings:
152
+ raise ValueError("No valid mutation fields found in input")
153
+
154
+ # Concatenate and project
155
+ concat_emb = torch.cat(embeddings, dim=-1)
156
+ projected_emb = self.projection(concat_emb)
157
+
158
+ # Apply layer norm and dropout
159
+ output = self.layer_norm(projected_emb)
160
+ output = self.dropout(output)
161
+
162
+ return output
163
+
164
+
165
+ class HierarchicalAttention(nn.Module):
166
+
167
+ def __init__(self, d_model: int, nhead: int = 8, dropout: float = 0.1):
168
+ super().__init__()
169
+
170
+ self.d_model = d_model
171
+ self.nhead = nhead
172
+
173
+ # Multi-head attention
174
+ self.multihead_attn = nn.MultiheadAttention(
175
+ d_model, nhead, dropout=dropout, batch_first=True
176
+ )
177
+
178
+ # Attention pooling
179
+ self.attention_weights = nn.Parameter(torch.randn(d_model))
180
+ self.layer_norm = nn.LayerNorm(d_model)
181
+ self.dropout = nn.Dropout(dropout)
182
+
183
+ def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]:
184
+ """
185
+ Args:
186
+ x: Input tensor [batch_size, seq_len, d_model]
187
+ mask: Attention mask [batch_size, seq_len]
188
+ Returns:
189
+ Tuple of (pooled_output, attention_weights)
190
+ """
191
+ # Self-attention
192
+ attn_output, attn_weights = self.multihead_attn(x, x, x, key_padding_mask=mask)
193
+ attn_output = self.layer_norm(attn_output + x) # Residual connection
194
+
195
+ # Attention pooling
196
+ scores = torch.matmul(attn_output, self.attention_weights) # [batch_size, seq_len]
197
+
198
+ if mask is not None:
199
+ scores = scores.masked_fill(mask, float('-inf'))
200
+
201
+ attention_probs = F.softmax(scores, dim=-1) # [batch_size, seq_len]
202
+ pooled_output = torch.sum(attention_probs.unsqueeze(-1) * attn_output, dim=1) # [batch_size, d_model]
203
+
204
+ pooled_output = self.dropout(pooled_output)
205
+
206
+ return pooled_output, attention_probs
207
+
208
+
209
+ class HierarchicalTransformerLayer(nn.Module):
210
+
211
+ def __init__(self, d_model: int, nhead: int = 8, dim_feedforward: int = 2048,
212
+ dropout: float = 0.1, activation: str = "gelu"):
213
+ super().__init__()
214
+
215
+ self.hierarchical_attention = HierarchicalAttention(d_model, nhead, dropout)
216
+
217
+ # Feed-forward network
218
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
219
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
220
+ self.norm1 = nn.LayerNorm(d_model)
221
+ self.norm2 = nn.LayerNorm(d_model)
222
+ self.dropout1 = nn.Dropout(dropout)
223
+ self.dropout2 = nn.Dropout(dropout)
224
+
225
+ if activation == "gelu":
226
+ self.activation = F.gelu
227
+ elif activation == "relu":
228
+ self.activation = F.relu
229
+ else:
230
+ raise ValueError(f"Unsupported activation: {activation}")
231
+
232
+ def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]:
233
+ """
234
+ Args:
235
+ x: Input tensor [batch_size, seq_len, d_model]
236
+ mask: Attention mask
237
+ Returns:
238
+ Tuple of (output, attention_weights)
239
+ """
240
+ # Hierarchical attention
241
+ attn_output, attn_weights = self.hierarchical_attention(x, mask)
242
+ x = self.norm1(x.mean(dim=1) + self.dropout1(attn_output)) # Pool input for residual
243
+
244
+ # Feed-forward
245
+ ff_output = self.linear2(self.dropout2(self.activation(self.linear1(x))))
246
+ x = self.norm2(x + ff_output)
247
+
248
+ return x, attn_weights
249
+
250
+
251
+ class HierarchicalVCFModel(PreTrainedModel):
252
+ """
253
+ This model processes VCF data in a hierarchical manner:
254
+ Mutations -> Genes -> Chromosomes -> Pathways -> Sample
255
+ """
256
+
257
+ config_class = HierarchicalVCFConfig
258
+
259
+ def __init__(self, config: HierarchicalVCFConfig):
260
+ super().__init__(config)
261
+
262
+ self.config = config
263
+ self.num_classes = config.num_classes
264
+
265
+ # Embedding layers
266
+ self.mutation_embedder = MutationEmbedder(
267
+ vocab_sizes=config.vocab_sizes,
268
+ embed_dim=config.embed_dim,
269
+ dropout=config.hidden_dropout_prob
270
+ )
271
+
272
+ # Positional encoding
273
+ if config.use_positional_encoding:
274
+ self.pos_encoder = PositionalEncoding(
275
+ config.embed_dim,
276
+ max_len=config.max_position_embeddings,
277
+ dropout=config.hidden_dropout_prob
278
+ )
279
+
280
+ # Hierarchical transformer layers
281
+ self.transformer_layers = nn.ModuleList([
282
+ HierarchicalTransformerLayer(
283
+ d_model=config.embed_dim,
284
+ nhead=config.nhead,
285
+ dim_feedforward=config.transformer_dim,
286
+ dropout=config.attention_probs_dropout_prob,
287
+ activation=config.activation
288
+ )
289
+ for _ in range(config.num_layers)
290
+ ])
291
+
292
+ # Hierarchical aggregation layers
293
+ self.gene_aggregator = HierarchicalAttention(config.embed_dim, config.nhead)
294
+ self.chromosome_aggregator = HierarchicalAttention(config.embed_dim, config.nhead)
295
+ self.pathway_aggregator = HierarchicalAttention(config.embed_dim, config.nhead)
296
+
297
+ # Classification head
298
+ classifier_layers = []
299
+ input_dim = config.embed_dim
300
+
301
+ for hidden_dim in config.hidden_dims:
302
+ classifier_layers.extend([
303
+ nn.Linear(input_dim, hidden_dim),
304
+ nn.ReLU(),
305
+ nn.Dropout(config.classifier_dropout or config.hidden_dropout_prob)
306
+ ])
307
+ input_dim = hidden_dim
308
+
309
+ classifier_layers.append(nn.Linear(input_dim, config.num_classes))
310
+
311
+ self.classifier = nn.Sequential(*classifier_layers)
312
+
313
+ # Initialize weights
314
+ self.apply(self._init_weights)
315
+
316
+ def _init_weights(self, module):
317
+
318
+ if isinstance(module, nn.Linear):
319
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
320
+ if module.bias is not None:
321
+ torch.nn.init.zeros_(module.bias)
322
+ elif isinstance(module, nn.Embedding):
323
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
324
+ elif isinstance(module, nn.LayerNorm):
325
+ torch.nn.init.zeros_(module.bias)
326
+ torch.nn.init.ones_(module.weight)
327
+
328
+ def forward(self,
329
+ input_data: Dict[str, Any],
330
+ labels: Optional[torch.Tensor] = None,
331
+ output_attentions: bool = False,
332
+ output_hidden_states: bool = False,
333
+ return_dict: bool = True) -> Union[Tuple, HierarchicalVCFOutput]:
334
+ """
335
+ Args:
336
+ input_data: Hierarchical input data from data collator
337
+ labels: Labels for supervised learning
338
+ output_attentions: Whether to output attention weights
339
+ output_hidden_states: Whether to output hidden states
340
+ return_dict: Whether to return ModelOutput object
341
+ Returns:
342
+ HierarchicalVCFOutput or tuple of outputs
343
+ """
344
+
345
+ batch_samples = input_data['samples']
346
+ batch_size = len(batch_samples)
347
+
348
+ sample_embeddings = []
349
+ all_attentions = [] if output_attentions else None
350
+ hierarchical_embeddings = {} if output_hidden_states else None
351
+
352
+ for sample_idx, sample in enumerate(batch_samples):
353
+ sample_embedding = self._process_sample(
354
+ sample,
355
+ output_attentions=output_attentions,
356
+ output_hidden_states=output_hidden_states
357
+ )
358
+
359
+ if output_attentions:
360
+ sample_embedding, sample_attentions = sample_embedding
361
+ all_attentions.append(sample_attentions)
362
+
363
+ if output_hidden_states:
364
+ sample_embedding, sample_hierarchical = sample_embedding
365
+ for level, emb in sample_hierarchical.items():
366
+ if level not in hierarchical_embeddings:
367
+ hierarchical_embeddings[level] = []
368
+ hierarchical_embeddings[level].append(emb)
369
+
370
+ sample_embeddings.append(sample_embedding)
371
+
372
+ # Stack sample embeddings
373
+ if sample_embeddings:
374
+ hidden_states = torch.stack(sample_embeddings) # [batch_size, embed_dim]
375
+ else:
376
+ hidden_states = torch.zeros(batch_size, self.config.embed_dim, device=self.device)
377
+
378
+ # Classification
379
+ logits = self.classifier(hidden_states)
380
+
381
+ # Compute loss if labels provided
382
+ loss = None
383
+ if labels is not None:
384
+ if self.config.num_classes == 1:
385
+ # Regression
386
+ loss_fct = nn.MSELoss()
387
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
388
+ else:
389
+ # Classification
390
+ loss_fct = nn.CrossEntropyLoss()
391
+ loss = loss_fct(logits.view(-1, self.config.num_classes), labels.view(-1))
392
+
393
+ if not return_dict:
394
+ output = (logits,)
395
+ if output_hidden_states:
396
+ output = output + (hidden_states,)
397
+ if output_attentions:
398
+ output = output + (all_attentions,)
399
+ if loss is not None:
400
+ output = (loss,) + output
401
+ return output
402
+
403
+ return HierarchicalVCFOutput(
404
+ loss=loss,
405
+ logits=logits,
406
+ hidden_states=hidden_states,
407
+ attentions=all_attentions,
408
+ hierarchical_embeddings=hierarchical_embeddings
409
+ )
410
+
411
+ def _process_sample(self,
412
+ sample: Dict[str, Any],
413
+ output_attentions: bool = False,
414
+ output_hidden_states: bool = False) -> torch.Tensor:
415
+ """
416
+ Process a single hierarchical sample.
417
+ Args:
418
+ sample: Single sample from batch
419
+ output_attentions: Whether to return attention weights
420
+ output_hidden_states: Whether to return hierarchical embeddings
421
+ Returns:
422
+ Sample embedding tensor or tuple with additional outputs
423
+ """
424
+
425
+ pathway_embeddings = []
426
+ sample_attentions = {} if output_attentions else None
427
+ sample_hierarchical = {} if output_hidden_states else None
428
+
429
+ for pathway_token, chromosomes in sample.items():
430
+ chromosome_embeddings = []
431
+
432
+ for chrom_token, genes in chromosomes.items():
433
+ gene_embeddings = []
434
+
435
+ for gene_token, mutations in genes.items():
436
+ # Process mutations for this gene
437
+ gene_embedding = self._process_gene_mutations(
438
+ mutations,
439
+ output_attentions=output_attentions
440
+ )
441
+
442
+ if output_attentions:
443
+ gene_embedding, gene_attentions = gene_embedding
444
+ if 'gene_level' not in sample_attentions:
445
+ sample_attentions['gene_level'] = []
446
+ sample_attentions['gene_level'].append(gene_attentions)
447
+
448
+ gene_embeddings.append(gene_embedding)
449
+
450
+ if gene_embeddings:
451
+ # Aggregate genes to chromosome level
452
+ gene_tensor = torch.stack(gene_embeddings).unsqueeze(0) # [1, num_genes, embed_dim]
453
+ chrom_embedding, chrom_attention = self.chromosome_aggregator(gene_tensor)
454
+ chrom_embedding = chrom_embedding.squeeze(0) # [embed_dim]
455
+
456
+ chromosome_embeddings.append(chrom_embedding)
457
+
458
+ if output_attentions:
459
+ if 'chromosome_level' not in sample_attentions:
460
+ sample_attentions['chromosome_level'] = []
461
+ sample_attentions['chromosome_level'].append(chrom_attention)
462
+
463
+ if chromosome_embeddings:
464
+ # Aggregate chromosomes to pathway level
465
+ chrom_tensor = torch.stack(chromosome_embeddings).unsqueeze(0) # [1, num_chroms, embed_dim]
466
+ pathway_embedding, pathway_attention = self.pathway_aggregator(chrom_tensor)
467
+ pathway_embedding = pathway_embedding.squeeze(0) # [embed_dim]
468
+
469
+ pathway_embeddings.append(pathway_embedding)
470
+
471
+ if output_attentions:
472
+ if 'pathway_level' not in sample_attentions:
473
+ sample_attentions['pathway_level'] = []
474
+ sample_attentions['pathway_level'].append(pathway_attention)
475
+
476
+ if output_hidden_states:
477
+ sample_hierarchical['pathway_embeddings'] = pathway_embeddings
478
+
479
+ if pathway_embeddings:
480
+ # Aggregate pathways to sample level
481
+ pathway_tensor = torch.stack(pathway_embeddings).unsqueeze(0) # [1, num_pathways, embed_dim]
482
+ sample_embedding, sample_attention = self.gene_aggregator(pathway_tensor) # Reuse gene aggregator
483
+ sample_embedding = sample_embedding.squeeze(0) # [embed_dim]
484
+
485
+ if output_attentions:
486
+ sample_attentions['sample_level'] = sample_attention
487
+ else:
488
+ # Handle empty sample
489
+ sample_embedding = torch.zeros(self.config.embed_dim, device=self.device)
490
+
491
+ # Prepare return value
492
+ result = sample_embedding
493
+
494
+ if output_attentions and output_hidden_states:
495
+ result = (result, sample_attentions, sample_hierarchical)
496
+ elif output_attentions:
497
+ result = (result, sample_attentions)
498
+ elif output_hidden_states:
499
+ result = (result, sample_hierarchical)
500
+
501
+ return result
502
+
503
+ def _process_gene_mutations(self,
504
+ mutations: Dict[str, Any],
505
+ output_attentions: bool = False) -> torch.Tensor:
506
+ """
507
+ Process mutations for a single gene.
508
+ Args:
509
+ mutations: Mutation data for gene
510
+ output_attentions: Whether to return attention weights
511
+ Returns:
512
+ Gene embedding tensor
513
+ """
514
+
515
+ # Handle masked format from data collator
516
+ mutation_tensors = {}
517
+ attention_mask = None
518
+
519
+ for field in ['impact', 'ref', 'alt']:
520
+ if field in mutations:
521
+ if isinstance(mutations[field], dict) and 'tokens' in mutations[field]:
522
+ # Masked format
523
+ mutation_tensors[field] = torch.tensor(mutations[field]['tokens'], device=self.device)
524
+ if attention_mask is None:
525
+ attention_mask = torch.tensor(mutations[field]['mask'], device=self.device).bool()
526
+ else:
527
+ # Direct format
528
+ mutation_tensors[field] = torch.tensor(mutations[field], device=self.device)
529
+
530
+ if not mutation_tensors:
531
+ return torch.zeros(self.config.embed_dim, device=self.device)
532
+
533
+ # Embed mutations
534
+ mutation_embeddings = self.mutation_embedder(mutation_tensors) # [seq_len, embed_dim]
535
+
536
+ # Add positional encoding if enabled
537
+ if self.config.use_positional_encoding:
538
+ mutation_embeddings = mutation_embeddings.unsqueeze(1) # [seq_len, 1, embed_dim]
539
+ mutation_embeddings = self.pos_encoder(mutation_embeddings)
540
+ mutation_embeddings = mutation_embeddings.squeeze(1) # [seq_len, embed_dim]
541
+
542
+ # Apply transformer layers
543
+ mutation_embeddings = mutation_embeddings.unsqueeze(0) # [1, seq_len, embed_dim]
544
+
545
+ layer_attentions = [] if output_attentions else None
546
+
547
+ for layer in self.transformer_layers:
548
+ mutation_embeddings, layer_attention = layer(mutation_embeddings, attention_mask)
549
+ mutation_embeddings = mutation_embeddings.unsqueeze(1) # Add seq dim back
550
+
551
+ if output_attentions:
552
+ layer_attentions.append(layer_attention)
553
+
554
+ # Pool to get gene representation
555
+ if attention_mask is not None:
556
+ # Masked pooling
557
+ mask_expanded = attention_mask.unsqueeze(-1).expand_as(mutation_embeddings.squeeze(0))
558
+ masked_embeddings = mutation_embeddings.squeeze(0) * mask_expanded.float()
559
+ gene_embedding = masked_embeddings.sum(dim=0) / mask_expanded.sum(dim=0).clamp(min=1)
560
+ else:
561
+ # Simple mean pooling
562
+ gene_embedding = mutation_embeddings.mean(dim=1).squeeze(0)
563
+
564
+ if output_attentions:
565
+ return gene_embedding, layer_attentions
566
+
567
+ return gene_embedding
568
+
569
+ @property
570
+ def device(self) -> torch.device:
571
+ """Get model device."""
572
+ return next(self.parameters()).device
573
+
574
+ def create_model_from_config(config_manager: ConfigManager,
575
+ tokenizer: HierarchicalVCFTokenizer) -> HierarchicalVCFModel:
576
+ """
577
+ Args:
578
+ config_manager: Configuration manager
579
+ tokenizer: Tokenizer instance
580
+ task_type: Type of task ('classification', 'regression')
581
+ Returns:
582
+ Configured model
583
+ """
584
+
585
+ model_config = config_manager.model_config
586
+
587
+ # Create Hugging Face config
588
+ hf_config = HierarchicalVCFConfig(
589
+ vocab_sizes=tokenizer.get_all_vocab_sizes(),
590
+ embed_dim=model_config.embed_dim,
591
+ transformer_dim=model_config.transformer_dim,
592
+ nhead=model_config.nhead,
593
+ num_layers=model_config.num_layers,
594
+ num_classes=model_config.num_classes,
595
+ hidden_dims=model_config.hidden_dims,
596
+ dropout=model_config.dropout
597
+ )
598
+
599
+ # Create model based on task type
600
+ model = HierarchicalVCFModel(hf_config)
601
+
602
+ return model
603
+
604
+
605
+ # Model utilities
606
+ class ModelTrainer:
607
+ """
608
+ Training utilities for Hierarchical VCF Model.
609
+ """
610
+
611
+ def __init__(self,
612
+ model: HierarchicalVCFModel,
613
+ train_dataloader,
614
+ val_dataloader,
615
+ optimizer: Optional[torch.optim.Optimizer] = None,
616
+ scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,
617
+ device: Optional[torch.device] = None):
618
+
619
+ self.model = model
620
+ self.train_dataloader = train_dataloader
621
+ self.val_dataloader = val_dataloader
622
+ self.device = device or torch.device('cuda' if torch.cuda.is_available() else 'cpu')
623
+
624
+ # Move model to device
625
+ self.model.to(self.device)
626
+
627
+ # Default optimizer
628
+ if optimizer is None:
629
+ self.optimizer = torch.optim.AdamW(
630
+ model.parameters(),
631
+ lr=1e-4,
632
+ weight_decay=0.01
633
+ )
634
+ else:
635
+ self.optimizer = optimizer
636
+
637
+ self.scheduler = scheduler
638
+
639
+ # Training metrics
640
+ self.train_losses = []
641
+ self.val_losses = []
642
+ self.val_accuracies = []
643
+
644
+ def train_epoch(self) -> float:
645
+ """Train for one epoch."""
646
+ self.model.train()
647
+ total_loss = 0.0
648
+ num_batches = 0
649
+
650
+ for batch in self.train_dataloader:
651
+ self.optimizer.zero_grad()
652
+
653
+ # Move data to device
654
+ if 'labels' in batch:
655
+ labels = batch['labels'].to(self.device)
656
+ else:
657
+ labels = None
658
+
659
+ # Forward pass
660
+ outputs = self.model(batch, labels=labels)
661
+ loss = outputs.loss if hasattr(outputs, 'loss') else outputs[0]
662
+
663
+ # Backward pass
664
+ loss.backward()
665
+ self.optimizer.step()
666
+
667
+ total_loss += loss.item()
668
+ num_batches += 1
669
+
670
+ if self.scheduler:
671
+ self.scheduler.step()
672
+
673
+ avg_loss = total_loss / max(num_batches, 1)
674
+ self.train_losses.append(avg_loss)
675
+
676
+ return avg_loss
677
+
678
+ def validate(self) -> Tuple[float, float]:
679
+ """Validate model."""
680
+ self.model.eval()
681
+ total_loss = 0.0
682
+ correct_predictions = 0
683
+ total_predictions = 0
684
+ num_batches = 0
685
+
686
+ with torch.no_grad():
687
+ for batch in self.val_dataloader:
688
+ # Move data to device
689
+ if 'labels' in batch:
690
+ labels = batch['labels'].to(self.device)
691
+ else:
692
+ continue # Skip if no labels
693
+
694
+ # Forward pass
695
+ outputs = self.model(batch, labels=labels)
696
+ loss = outputs.loss if hasattr(outputs, 'loss') else outputs[0]
697
+ logits = outputs.logits if hasattr(outputs, 'logits') else outputs[1]
698
+
699
+ total_loss += loss.item()
700
+
701
+ # Calculate accuracy
702
+ predictions = torch.argmax(logits, dim=-1)
703
+ correct_predictions += (predictions == labels).sum().item()
704
+ total_predictions += labels.size(0)
705
+ num_batches += 1
706
+
707
+ avg_loss = total_loss / max(num_batches, 1)
708
+ accuracy = correct_predictions / max(total_predictions, 1)
709
+
710
+ self.val_losses.append(avg_loss)
711
+ self.val_accuracies.append(accuracy)
712
+
713
+ return avg_loss, accuracy
714
+
715
+ def train(self, num_epochs: int, save_path: Optional[str] = None) -> Dict[str, List[float]]:
716
+ """
717
+ Train model for specified number of epochs.
718
+
719
+ Args:
720
+ num_epochs: Number of training epochs
721
+ save_path: Path to save best model
722
+
723
+ Returns:
724
+ Training history
725
+ """
726
+
727
+ best_val_loss = float('inf')
728
+
729
+ logger.info(f"Starting training for {num_epochs} epochs...")
730
+
731
+ for epoch in range(num_epochs):
732
+ # Train
733
+ train_loss = self.train_epoch()
734
+
735
+ # Validate
736
+ val_loss, val_accuracy = self.validate()
737
+
738
+ logger.info(
739
+ f"Epoch {epoch+1}/{num_epochs}: "
740
+ f"Train Loss: {train_loss:.4f}, "
741
+ f"Val Loss: {val_loss:.4f}, "
742
+ f"Val Accuracy: {val_accuracy:.4f}"
743
+ )
744
+
745
+ # Save best model
746
+ if save_path and val_loss < best_val_loss:
747
+ best_val_loss = val_loss
748
+ self.model.save_pretrained(save_path)
749
+ logger.info(f"Saved best model to {save_path}")
750
+
751
+ return {
752
+ 'train_losses': self.train_losses,
753
+ 'val_losses': self.val_losses,
754
+ 'val_accuracies': self.val_accuracies
755
+ }
756
+
757
+
758
+ # Example usage and testing
759
+ if __name__ == "__main__":
760
+ from tokenizer import create_tokenizer_from_config
761
+ from dataset import create_data_module_from_config
762
+
763
+ # Create configuration
764
+ config_manager = ConfigManager()
765
+ config_manager.model_config.embed_dim = 32
766
+ config_manager.model_config.num_classes = 2
767
+
768
+ # Create tokenizer and model
769
+ tokenizer = create_tokenizer_from_config(config_manager)
770
+
771
+ # Build vocabulary with example data
772
+ example_data = {
773
+ 'sample1': {
774
+ 'pathway1': {
775
+ 'chr1': {
776
+ 'gene1': [
777
+ {'impact': 'HIGH', 'reference': 'A', 'alternate': 'T'}
778
+ ]
779
+ }
780
+ }
781
+ }
782
+ }
783
+ tokenizer.build_vocabulary(example_data)
784
+
785
+ # Create model
786
+ model = create_model_from_config(config_manager, tokenizer)
787
+
788
+ print(f"Model created with {sum(p.numel() for p in model.parameters())} parameters")
789
+ print(f"Model config: {model.config}")
790
+
791
+ # Test forward pass with dummy data
792
+ dummy_batch = {
793
+ 'samples': [example_data['sample1']],
794
+ 'batch_size': 1
795
+ }
796
+
797
+ with torch.no_grad():
798
+ outputs = model(dummy_batch)
799
+ print(f"Output logits shape: {outputs.logits.shape}")
800
+ print(f"Output logits: {outputs.logits}")