File size: 2,255 Bytes
141b176
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
"""
Custom Student Model for Knowledge Distillation
"""
import torch
import torch.nn as nn
from transformers import PreTrainedModel, PretrainedConfig
from typing import Dict, Any, List, Optional

class StudentModelConfig(PretrainedConfig):
    model_type = "distilled_student"

    def __init__(
        self,
        hidden_size=768,
        num_layers=12,
        num_attention_heads=12,
        intermediate_size=3072,
        vocab_size=30522,
        max_position_embeddings=512,
        modalities=["text"],
        **kwargs
    ):
        super().__init__(**kwargs)
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.num_attention_heads = num_attention_heads
        self.intermediate_size = intermediate_size
        self.vocab_size = vocab_size
        self.max_position_embeddings = max_position_embeddings
        self.modalities = modalities

class StudentModel(PreTrainedModel):
    config_class = StudentModelConfig

    def __init__(self, config):
        super().__init__(config)
        self.config = config
        self.hidden_size = config.hidden_size
        self.num_layers = config.num_layers
        self.modalities = config.modalities

        # Build model layers based on config
        self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
        self.layers = nn.ModuleList([
            nn.TransformerEncoderLayer(
                d_model=config.hidden_size,
                nhead=config.num_attention_heads,
                dim_feedforward=config.intermediate_size,
                batch_first=True
            ) for _ in range(config.num_layers)
        ])
        self.pooler = nn.Linear(config.hidden_size, config.hidden_size)

    def forward(self, input_ids=None, attention_mask=None, **kwargs):
        if input_ids is not None:
            embeddings = self.embeddings(input_ids)
        else:
            # Handle other modalities
            embeddings = kwargs.get('inputs_embeds')

        for layer in self.layers:
            embeddings = layer(embeddings, src_key_padding_mask=attention_mask)

        pooled = self.pooler(embeddings.mean(dim=1))

        return {
            'last_hidden_state': embeddings,
            'pooler_output': pooled
        }