File size: 1,892 Bytes
79d49da
 
 
1810b0b
79d49da
1810b0b
07e43ad
 
1810b0b
 
 
 
 
 
 
 
07e43ad
79d49da
6940c8b
79d49da
 
 
6940c8b
1810b0b
 
 
79d49da
 
 
 
 
 
 
b33fb90
07e43ad
79d49da
 
 
 
 
 
1810b0b
79d49da
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
# modeling_kbert_mtl.py
import torch
import torch.nn as nn
from transformers import PreTrainedModel, BertConfig, BertModel

def _bert_config_from_base_dict(base_cfg_dict: dict) -> BertConfig:
    if base_cfg_dict is None:
        raise ValueError("config.base_model_config is required for offline load.")

    base_cfg_dict = dict(base_cfg_dict)  # shallow copy
    base_cfg_dict["model_type"] = "bert"

    allowed = set(BertConfig().to_dict().keys())
    kwargs = {k: v for k, v in base_cfg_dict.items() if k in allowed}

    return BertConfig(**kwargs)

class KbertMTL(PreTrainedModel):
    config_class = BertConfig

    def __init__(self, config):
        super().__init__(config)

        base_cfg_dict = getattr(config, "base_model_config", None)
        bert_cfg = _bert_config_from_base_dict(base_cfg_dict)
        self.bert = BertModel(bert_cfg) 

        hidden = self.bert.config.hidden_size
        self.head_senti = nn.Linear(hidden, 5)
        self.head_act   = nn.Linear(hidden, 6)
        self.head_emo   = nn.Linear(hidden, 7)
        self.head_reg   = nn.Linear(hidden, 3)
        self.has_token_type = getattr(self.bert.embeddings, "token_type_embeddings", None) is not None

        self.post_init()

    def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, **kwargs):
        kw = dict(input_ids=input_ids, attention_mask=attention_mask)
        if self.has_token_type and token_type_ids is not None:
            kw["token_type_ids"] = token_type_ids
        out = self.bert(**kw)
        h = out.last_hidden_state[:, 0]  # [CLS]
        return {
            "logits_senti": self.head_senti(h),
            "logits_act":   self.head_act(h),
            "logits_emo":   self.head_emo(h),
            "pred_reg":     self.head_reg(h),
            "last_hidden_state": out.last_hidden_state
        }