File size: 2,777 Bytes
762d17f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import torch
from torch import nn
from transformers import PreTrainedModel, PretrainedConfig

class DummyConfig(PretrainedConfig):
    model_type = "dummy"
    
    def __init__(
        self,
        vocab_size=32000,
        hidden_size=32,
        intermediate_size=64,
        num_hidden_layers=1,
        num_attention_heads=1,
        max_position_embeddings=2048,
        pad_token_id=0,
        bos_token_id=1,
        eos_token_id=2,
        **kwargs
    ):
        super().__init__(
            pad_token_id=pad_token_id,
            bos_token_id=bos_token_id,
            eos_token_id=eos_token_id,
            **kwargs
        )
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.intermediate_size = intermediate_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.max_position_embeddings = max_position_embeddings

class DummyForCausalLM(PreTrainedModel):
    config_class = DummyConfig
    _keys_to_ignore_on_load_missing = ["lm_head.weight"]
    
    def __init__(self, config):
        super().__init__(config)
        self.config = config
        self.embed = nn.Embedding(config.vocab_size, config.hidden_size)
        self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
        
        # ๊ณ ์ • ์‘๋‹ต์šฉ ํ† ํฐ
        self.fixed_response = "์ด๊ฒƒ์€ ๋”๋ฏธ ๋ชจ๋ธ์˜ ๊ณ ์ • ์‘๋‹ต์ž…๋‹ˆ๋‹ค. vLLM ์„œ๋น™ ํ…Œ์ŠคํŠธ์šฉ์œผ๋กœ ๋งŒ๋“ค์–ด์กŒ์Šต๋‹ˆ๋‹ค."
        
        # ๊ฐ€์ค‘์น˜ ์ดˆ๊ธฐํ™”
        self.post_init()
        
    def get_input_embeddings(self):
        return self.embed
        
    def set_input_embeddings(self, value):
        self.embed = value
        
    def get_output_embeddings(self):
        return self.lm_head
    
    def forward(self, input_ids=None, attention_mask=None, **kwargs):
        batch_size = input_ids.shape[0] if input_ids is not None else 1
        seq_len = input_ids.shape[1] if input_ids is not None else 1
        
        # ๋งค์šฐ ๊ฐ„๋‹จํ•œ ์ž„๋ฒ ๋”ฉ
        dummy_hidden = torch.zeros((batch_size, seq_len, self.config.hidden_size), 
                                    dtype=torch.float32, 
                                    device=input_ids.device if input_ids is not None else "cpu")
        
        # ์ž„์˜์˜ ๋กœ์ง“ ๊ฐ’ ์ƒ์„ฑ
        logits = self.lm_head(dummy_hidden)
        
        # ํ•ญ์ƒ ๊ณ ์ •๋œ ์‘๋‹ต์œผ๋กœ ์˜ˆ์ธก๋˜๋„๋ก
        return {"logits": logits}
    
    def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs):
        return {
            "input_ids": input_ids,
            "past_key_values": past_key_values
        }
    
    def _reorder_cache(self, past_key_values, beam_idx):
        return past_key_values