Dusit-P commited on
Commit
ec1e0cc
·
verified ·
1 Parent(s): 48e0979

Update common/models.py

Browse files
Files changed (1) hide show
  1. common/models.py +125 -58
common/models.py CHANGED
@@ -1,58 +1,125 @@
1
- # common/models.py
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- from transformers import AutoModel
5
-
6
- # ตั้งค่าพื้นฐานให้ตรงกับตอนเทรน
7
- BASE_MODEL_NAME = "airesearch/wangchanberta-base-att-spm-uncased"
8
- POOLING_AFTER_LSTM = "masked_mean"
9
-
10
- class BaseHead(nn.Module):
11
- def __init__(self, hidden_in, hidden_lstm=128, num_classes=2, dropout=0.3, pooling='masked_mean'):
12
- super().__init__()
13
- self.lstm = nn.LSTM(hidden_in, hidden_lstm, bidirectional=True, batch_first=True)
14
- self.dropout = nn.Dropout(dropout)
15
- self.fc = nn.Linear(hidden_lstm*2, num_classes)
16
- assert pooling in ['cls','masked_mean','masked_max']
17
- self.pooling = pooling
18
- def pool(self, x, mask):
19
- if self.pooling=='cls': return x[:,0,:]
20
- mask = mask.unsqueeze(-1)
21
- if self.pooling=='masked_mean':
22
- s=(x*mask).sum(1); d=mask.sum(1).clamp(min=1e-6); return s/d
23
- x=x.masked_fill(mask==0,-1e9); return x.max(1).values
24
- def forward_after_bert(self, seq, mask):
25
- x, _ = self.lstm(seq)
26
- x = self.pool(x, mask)
27
- return self.fc(self.dropout(x))
28
-
29
- class Model1Baseline(nn.Module):
30
- def __init__(self, name=BASE_MODEL_NAME, hidden=128, dropout=0.3, classes=2, pooling=POOLING_AFTER_LSTM):
31
- super().__init__()
32
- self.bert = AutoModel.from_pretrained(name)
33
- self.head = BaseHead(self.bert.config.hidden_size, hidden, classes, dropout, pooling)
34
- def forward(self, ids, mask):
35
- out = self.bert(input_ids=ids, attention_mask=mask)
36
- return self.head.forward_after_bert(out.last_hidden_state, mask)
37
-
38
- class Model2CNNBiLSTM(nn.Module):
39
- def __init__(self, name=BASE_MODEL_NAME, hidden=128, dropout=0.3, classes=2, pooling=POOLING_AFTER_LSTM):
40
- super().__init__()
41
- self.bert = AutoModel.from_pretrained(name)
42
- H = self.bert.config.hidden_size
43
- self.c1 = nn.Conv1d(H,128,3,padding=1)
44
- self.c2 = nn.Conv1d(128,128,5,padding=2)
45
- self.head = BaseHead(128, hidden, classes, dropout, pooling)
46
- def forward(self, ids, mask):
47
- out = self.bert(input_ids=ids, attention_mask=mask).last_hidden_state
48
- x = F.relu(self.c1(out.transpose(1,2)))
49
- x = F.relu(self.c2(x)).transpose(1,2)
50
- return self.head.forward_after_bert(x, mask)
51
-
52
- def create_model_by_name(model_name):
53
- if model_name == "Model1_Baseline":
54
- return Model1Baseline()
55
- elif model_name == "Model2_CNN_BiLSTM":
56
- return Model2CNNBiLSTM()
57
- else:
58
- raise ValueError(f"Unknown model name: {model_name}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import json, torch, torch.nn as nn, torch.nn.functional as F
3
+ from pathlib import Path
4
+ from transformers import AutoModel, AutoTokenizer
5
+ from safetensors.torch import load_file as safe_load
6
+
7
+ # -------- Base pooling --------
8
+ class PoolingLayer(nn.Module):
9
+ def __init__(self, pooling="masked_mean"):
10
+ super().__init__()
11
+ assert pooling in ["cls","masked_mean","masked_max"]
12
+ self.pooling = pooling
13
+ def forward(self, x, mask):
14
+ if self.pooling == "cls":
15
+ return x[:,0,:]
16
+ mask = mask.unsqueeze(-1)
17
+ if self.pooling == "masked_mean":
18
+ s = (x * mask).sum(1)
19
+ d = mask.sum(1).clamp(min=1e-6)
20
+ return s / d
21
+ x = x.masked_fill(mask == 0, -1e9)
22
+ return x.max(1).values
23
+
24
+ # -------- Model defs --------
25
+ def _base_model(name):
26
+ return AutoModel.from_pretrained(name)
27
+
28
+ class Model1_WCB(nn.Module):
29
+ def __init__(self, name, num_labels=2, dropout=0.3):
30
+ super().__init__()
31
+ self.bert = _base_model(name)
32
+ H = self.bert.config.hidden_size
33
+ self.dropout = nn.Dropout(dropout)
34
+ self.fc = nn.Linear(H, num_labels)
35
+ def forward(self, ids, mask):
36
+ out = self.bert(input_ids=ids, attention_mask=mask)
37
+ cls = out.last_hidden_state[:,0,:]
38
+ return self.fc(self.dropout(cls))
39
+
40
+ class Model2_WCB_BiLSTM(nn.Module):
41
+ def __init__(self, name, num_labels=2, hidden=128, dropout=0.3, pooling="masked_mean"):
42
+ super().__init__()
43
+ self.bert = _base_model(name)
44
+ H = self.bert.config.hidden_size
45
+ self.lstm = nn.LSTM(H, hidden, bidirectional=True, batch_first=True)
46
+ self.pool = PoolingLayer(pooling)
47
+ self.dropout = nn.Dropout(dropout)
48
+ self.fc = nn.Linear(hidden*2, num_labels)
49
+ def forward(self, ids, mask):
50
+ seq = self.bert(input_ids=ids, attention_mask=mask).last_hidden_state
51
+ x,_ = self.lstm(seq)
52
+ x = self.pool(x, mask)
53
+ return self.fc(self.dropout(x))
54
+
55
+ class Model3_WCB_CNN_BiLSTM(nn.Module):
56
+ def __init__(self, name, num_labels=2, hidden=128, dropout=0.3, pooling="masked_mean"):
57
+ super().__init__()
58
+ self.bert = _base_model(name)
59
+ H = self.bert.config.hidden_size
60
+ self.c1 = nn.Conv1d(H,128,3,padding=1)
61
+ self.c2 = nn.Conv1d(128,128,5,padding=2)
62
+ self.lstm = nn.LSTM(128, hidden, bidirectional=True, batch_first=True)
63
+ self.pool = PoolingLayer(pooling)
64
+ self.dropout = nn.Dropout(dropout)
65
+ self.fc = nn.Linear(hidden*2, num_labels)
66
+ def forward(self, ids, mask):
67
+ out = self.bert(input_ids=ids, attention_mask=mask).last_hidden_state
68
+ x = F.relu(self.c1(out.transpose(1,2)))
69
+ x = F.relu(self.c2(x)).transpose(1,2)
70
+ x,_ = self.lstm(x)
71
+ x = self.pool(x, mask)
72
+ return self.fc(self.dropout(x))
73
+
74
+ class Model4_WCB_4Layer_BiLSTM(nn.Module):
75
+ def __init__(self, name, num_labels=2, hidden=128, dropout=0.3, pooling="masked_mean"):
76
+ super().__init__()
77
+ self.bert = _base_model(name)
78
+ H = self.bert.config.hidden_size
79
+ self.w = nn.Parameter(torch.ones(4))
80
+ self.lstm = nn.LSTM(H, hidden, bidirectional=True, batch_first=True)
81
+ self.pool = PoolingLayer(pooling)
82
+ self.dropout = nn.Dropout(dropout)
83
+ self.fc = nn.Linear(hidden*2, num_labels)
84
+ def _pool_layers(self, hs):
85
+ last4 = hs[-4:]
86
+ w = F.softmax(self.w, 0)
87
+ return sum(w[i]*last4[i] for i in range(4))
88
+ def forward(self, ids, mask):
89
+ out = self.bert(input_ids=ids, attention_mask=mask, output_hidden_states=True)
90
+ seq = self._pool_layers(out.hidden_states)
91
+ x,_ = self.lstm(seq)
92
+ x = self.pool(x, mask)
93
+ return self.fc(self.dropout(x))
94
+
95
+ # -------- Factory & Loader --------
96
+ def _build(arch, base_model, num_labels, pooling):
97
+ if arch == "WCB":
98
+ return Model1_WCB(base_model, num_labels)
99
+ if arch == "WCB_BiLSTM":
100
+ return Model2_WCB_BiLSTM(base_model, num_labels, pooling=pooling)
101
+ if arch == "WCB_CNN_BiLSTM":
102
+ return Model3_WCB_CNN_BiLSTM(base_model, num_labels, pooling=pooling)
103
+ if arch == "WCB_4Layer_BiLSTM":
104
+ return Model4_WCB_4Layer_BiLSTM(base_model, num_labels, pooling=pooling)
105
+ raise ValueError(f"Unknown architecture: {arch}")
106
+
107
+ def load_model(model_dir: str):
108
+ """
109
+ โหลดโมเดลจากโฟลเดอร์โมเดล (ที่มี config.json + model.safetensors)
110
+ Return: tokenizer, model (eval mode), config(dict)
111
+ """
112
+ d = Path(model_dir)
113
+ cfg = json.loads((d/"config.json").read_text(encoding="utf-8"))
114
+ arch = cfg.get("architecture","WCB")
115
+ base = cfg.get("base_model","airesearch/wangchanberta-base-att-spm-uncased")
116
+ nlabel = int(cfg.get("num_labels",2))
117
+ pooling = cfg.get("pooling_after_lstm","masked_mean")
118
+
119
+ model = _build(arch, base, nlabel, pooling)
120
+ sd = safe_load(str(d/"model.safetensors"))
121
+ model.load_state_dict(sd, strict=False)
122
+ model.eval()
123
+
124
+ tok = AutoTokenizer.from_pretrained(base, use_fast=True)
125
+ return tok, model, cfg