Datasets:
File size: 4,104 Bytes
950de05 2785a2e 950de05 2785a2e 950de05 2785a2e 950de05 2785a2e 950de05 2785a2e 950de05 2785a2e 950de05 2785a2e 950de05 2785a2e 950de05 2785a2e 950de05 4b4a869 950de05 4b4a869 950de05 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
import re
import torch
from difflib import SequenceMatcher
from transformers import T5Tokenizer, T5ForConditionalGeneration
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_error()
MODEL_DIR = "t5-viet-qg-finetuned"
PATTERN = re.compile(
r"""
“([^”]{3,120})”
| "([^"]{3,120})"
| \b(?:là|gồm|do|theo)\s+([^,.;:\n]{3,120})
| \b\d{4}\b
| \b(?:Điều|Khoản)\s+\d+\b
""",
re.VERBOSE | re.IGNORECASE,
)
def norm(s: str) -> str:
return re.sub(r"\s+", " ", s).strip()
def is_dup(q: str, qs: list[str], thr: float = 0.85) -> bool:
ql = q.lower()
for x in qs:
if SequenceMatcher(None, ql, x.lower()).ratio() >= thr:
return True
return False
def extract_answers(ctx: str, max_n: int = 60) -> list[str]:
ctx = norm(ctx)
answers, seen = [], set()
for m in PATTERN.finditer(ctx):
for g in m.groups():
if not g:
continue
g = norm(g)
k = g.lower()
if 3 <= len(g) <= 120 and k not in seen:
seen.add(k)
answers.append(g)
if len(answers) >= max_n:
return answers
if len(answers) < 8:
for i in range(0, min(len(ctx), 500), 60):
ch = norm(ctx[i : i + 60])
k = ch.lower()
if len(ch) >= 15 and k not in seen:
seen.add(k)
answers.append(ch)
if len(answers) >= max_n:
break
if not answers and ctx:
answers = [ctx[:120]]
return answers
def load_model():
tok = T5Tokenizer.from_pretrained(MODEL_DIR)
mdl = T5ForConditionalGeneration.from_pretrained(MODEL_DIR)
dev = "cuda" if torch.cuda.is_available() else "cpu"
try:
mdl = mdl.to(dev)
except RuntimeError:
dev = "cpu"
try:
torch.cuda.empty_cache()
except Exception:
pass
mdl = mdl.to(dev)
mdl.eval()
return tok, mdl, dev
tokenizer, model, device = load_model()
def generate_questions(context: str, n: int = 20) -> list[str]:
ctx = norm(context)
answers = extract_answers(ctx, max_n=80)
questions = []
gen_cfg = dict(
do_sample=True,
top_k=80,
top_p=0.98,
temperature=1.05,
max_new_tokens=72,
no_repeat_ngram_size=3,
repetition_penalty=1.08,
)
num_ret = 8 if n <= 20 else 10
def run_prompt(ans: str, rounds: int):
nonlocal gen_cfg
prompt = f"answer: {ans}\ncontext: {ctx}\nquestion:"
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512).to(device)
for _ in range(rounds):
outs = model.generate(**inputs, num_return_sequences=num_ret, **gen_cfg)
added = 0
for o in outs:
q = norm(tokenizer.decode(o, skip_special_tokens=True))
if not q:
continue
if not q.endswith("?"):
q += "?"
if len(q) >= 6 and not is_dup(q, questions, thr=0.85):
questions.append(q)
added += 1
if len(questions) >= n:
return
if added == 0:
gen_cfg["temperature"] = min(1.25, gen_cfg["temperature"] + 0.05)
gen_cfg["top_p"] = min(0.995, gen_cfg["top_p"] + 0.005)
with torch.inference_mode():
for ans in answers:
if len(questions) >= n:
break
run_prompt(ans, rounds=6)
if len(questions) < n:
run_prompt(ctx[:120], rounds=12)
return questions[:n]
if __name__ == "__main__":
ctx = input("\nNhập đoạn văn bản:\n").strip()
try:
n = int((input("\nNhập số lượng câu hỏi cần sinh: ").strip() or ""))
except ValueError:
n = 20
n = max(1, min(n, 200))
qs = generate_questions(ctx, n)
print("\nCác câu hỏi sinh ra:")
for i, q in enumerate(qs, 1):
print(f"{i}. {q}")
|