Upload inference.py with huggingface_hub
Browse files- inference.py +166 -0
inference.py
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
"""Inference cho Vietnamese QA Stacking Ensemble v1. Load từ Hugging Face Hub."""
|
| 3 |
+
import json
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
import torch.nn.functional as F
|
| 9 |
+
from transformers import AutoModelForQuestionAnswering, AutoTokenizer
|
| 10 |
+
|
| 11 |
+
try:
|
| 12 |
+
from huggingface_hub import hf_hub_download
|
| 13 |
+
except ImportError:
|
| 14 |
+
hf_hub_download = None
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class MetaCNN(nn.Module):
|
| 18 |
+
def __init__(self):
|
| 19 |
+
super().__init__()
|
| 20 |
+
self.conv = nn.Sequential(
|
| 21 |
+
nn.Conv1d(4, 32, 3, padding=1),
|
| 22 |
+
nn.ReLU(),
|
| 23 |
+
nn.Conv1d(32, 32, 3, padding=1),
|
| 24 |
+
nn.ReLU(),
|
| 25 |
+
)
|
| 26 |
+
self.start_fc = nn.Linear(32, 1)
|
| 27 |
+
self.end_fc = nn.Linear(32, 1)
|
| 28 |
+
|
| 29 |
+
def forward(self, x):
|
| 30 |
+
x = self.conv(x)
|
| 31 |
+
start = self.start_fc(x.transpose(1, 2)).squeeze(-1)
|
| 32 |
+
end = self.end_fc(x.transpose(1, 2)).squeeze(-1)
|
| 33 |
+
return start, end
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def _load_tok(mid, use_fast=True):
|
| 37 |
+
try:
|
| 38 |
+
return AutoTokenizer.from_pretrained(mid, use_fast=use_fast)
|
| 39 |
+
except Exception as e:
|
| 40 |
+
if "sentencepiece" in str(e).lower() and use_fast:
|
| 41 |
+
return AutoTokenizer.from_pretrained(mid, use_fast=False)
|
| 42 |
+
raise
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def load_ensemble(repo_id: str = None, local_dir: str = None):
|
| 46 |
+
"""Load ensemble từ Hugging Face hoặc thư mục local."""
|
| 47 |
+
if local_dir:
|
| 48 |
+
path = Path(local_dir)
|
| 49 |
+
config_path = path / "config.json"
|
| 50 |
+
meta_path = path / "meta_cnn.pth"
|
| 51 |
+
elif repo_id and hf_hub_download:
|
| 52 |
+
config_path = hf_hub_download(repo_id=repo_id, filename="config.json")
|
| 53 |
+
meta_path = hf_hub_download(repo_id=repo_id, filename="meta_cnn.pth")
|
| 54 |
+
else:
|
| 55 |
+
raise ValueError("Cần repo_id hoặc local_dir")
|
| 56 |
+
|
| 57 |
+
with open(config_path, encoding="utf-8") as f:
|
| 58 |
+
config = json.load(f)
|
| 59 |
+
|
| 60 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 61 |
+
tokenizer1 = _load_tok(config["base_models"][0])
|
| 62 |
+
tokenizer2 = AutoTokenizer.from_pretrained(config["base_models"][1], use_fast=False)
|
| 63 |
+
|
| 64 |
+
model1 = AutoModelForQuestionAnswering.from_pretrained(config["base_models"][0]).to(device)
|
| 65 |
+
model2 = AutoModelForQuestionAnswering.from_pretrained(config["base_models"][1]).to(device)
|
| 66 |
+
|
| 67 |
+
meta_model = MetaCNN().to(device)
|
| 68 |
+
meta_model.load_state_dict(torch.load(meta_path, map_location=device))
|
| 69 |
+
meta_model.eval()
|
| 70 |
+
model1.eval()
|
| 71 |
+
model2.eval()
|
| 72 |
+
|
| 73 |
+
return {
|
| 74 |
+
"tokenizer1": tokenizer1,
|
| 75 |
+
"tokenizer2": tokenizer2,
|
| 76 |
+
"model1": model1,
|
| 77 |
+
"model2": model2,
|
| 78 |
+
"meta_model": meta_model,
|
| 79 |
+
"device": device,
|
| 80 |
+
"max_len_1": config.get("max_length", 512),
|
| 81 |
+
"max_len_2": config.get("max_len_2", 256),
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def _pad_to_512(x):
|
| 86 |
+
if x.size(0) < 512:
|
| 87 |
+
pad = torch.zeros(512 - x.size(0), dtype=x.dtype, device=x.device)
|
| 88 |
+
x = torch.cat([x, pad], dim=0)
|
| 89 |
+
return x[:512]
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def predict(question: str, context: str, ensemble: dict, max_answer_len: int = 30):
|
| 93 |
+
"""Trả về (answer, no_answer_probability)."""
|
| 94 |
+
t1, t2 = ensemble["tokenizer1"], ensemble["tokenizer2"]
|
| 95 |
+
m1, m2 = ensemble["model1"], ensemble["model2"]
|
| 96 |
+
meta = ensemble["meta_model"]
|
| 97 |
+
dev = ensemble["device"]
|
| 98 |
+
max1, max2 = ensemble["max_len_1"], ensemble["max_len_2"]
|
| 99 |
+
|
| 100 |
+
enc1 = t1(
|
| 101 |
+
question,
|
| 102 |
+
context,
|
| 103 |
+
return_tensors="pt",
|
| 104 |
+
truncation="only_second",
|
| 105 |
+
max_length=max1,
|
| 106 |
+
padding="max_length",
|
| 107 |
+
)
|
| 108 |
+
enc2 = t2(
|
| 109 |
+
question,
|
| 110 |
+
context,
|
| 111 |
+
return_tensors="pt",
|
| 112 |
+
truncation="only_second",
|
| 113 |
+
max_length=max2,
|
| 114 |
+
padding="max_length",
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
inp1 = {k: v.to(dev) for k, v in enc1.items()}
|
| 118 |
+
inp2 = {k: v.to(dev) for k, v in enc2.items()}
|
| 119 |
+
|
| 120 |
+
try:
|
| 121 |
+
seq_ids = enc1.sequence_ids(0)
|
| 122 |
+
except Exception:
|
| 123 |
+
sep_id = t1.convert_tokens_to_ids(t1.sep_token or "</s>")
|
| 124 |
+
ids = enc1["input_ids"][0].tolist()
|
| 125 |
+
sep_pos = [i for i, x in enumerate(ids) if x == sep_id]
|
| 126 |
+
if len(sep_pos) < 2:
|
| 127 |
+
return "", 1.0
|
| 128 |
+
ctx_idx = list(range(sep_pos[0] + 1, sep_pos[1]))
|
| 129 |
+
else:
|
| 130 |
+
ctx_idx = [i for i, s in enumerate(seq_ids) if s == 1]
|
| 131 |
+
|
| 132 |
+
if not ctx_idx:
|
| 133 |
+
return "", 1.0
|
| 134 |
+
|
| 135 |
+
ctx_start, ctx_end = ctx_idx[0], ctx_idx[-1]
|
| 136 |
+
|
| 137 |
+
with torch.no_grad():
|
| 138 |
+
o1, o2 = m1(**inp1), m2(**inp2)
|
| 139 |
+
|
| 140 |
+
s1 = o1.start_logits[0][:512]
|
| 141 |
+
e1 = o1.end_logits[0][:512]
|
| 142 |
+
s2 = _pad_to_512(o2.start_logits[0])
|
| 143 |
+
e2 = _pad_to_512(o2.end_logits[0])
|
| 144 |
+
|
| 145 |
+
combined = torch.stack([s1, e1, s2, e2], dim=0).unsqueeze(0)
|
| 146 |
+
with torch.no_grad():
|
| 147 |
+
fs, fe = meta(combined)
|
| 148 |
+
|
| 149 |
+
sp = F.softmax(fs[0], dim=-1)
|
| 150 |
+
ep = F.softmax(fe[0], dim=-1)
|
| 151 |
+
|
| 152 |
+
best = -1e9
|
| 153 |
+
bs, be = ctx_start, ctx_start
|
| 154 |
+
for s in range(ctx_start, ctx_end + 1):
|
| 155 |
+
for e in range(s, min(s + max_answer_len, ctx_end) + 1):
|
| 156 |
+
sc = torch.log(sp[s] + 1e-12) + torch.log(ep[e] + 1e-12)
|
| 157 |
+
if sc > best:
|
| 158 |
+
best, bs, be = sc, s, e
|
| 159 |
+
|
| 160 |
+
null = torch.log(sp[0] + 1e-12) + torch.log(ep[0] + 1e-12)
|
| 161 |
+
no_ans = torch.sigmoid(null - best).item()
|
| 162 |
+
if null > best:
|
| 163 |
+
return "", no_ans
|
| 164 |
+
|
| 165 |
+
ans = t1.decode(enc1["input_ids"][0][bs:be + 1], skip_special_tokens=True).strip()
|
| 166 |
+
return ans, no_ans
|