|
|
import re |
|
|
import torch |
|
|
from difflib import SequenceMatcher |
|
|
from transformers import T5Tokenizer, T5ForConditionalGeneration |
|
|
from transformers.utils import logging as hf_logging |
|
|
|
|
|
hf_logging.set_verbosity_error() |
|
|
|
|
|
MODEL_DIR = "t5-viet-qg-finetuned" |
|
|
|
|
|
PATTERN = re.compile( |
|
|
r""" |
|
|
“([^”]{3,120})” |
|
|
| "([^"]{3,120})" |
|
|
| \b(?:là|gồm|do|theo)\s+([^,.;:\n]{3,120}) |
|
|
| \b\d{4}\b |
|
|
| \b(?:Điều|Khoản)\s+\d+\b |
|
|
""", |
|
|
re.VERBOSE | re.IGNORECASE, |
|
|
) |
|
|
|
|
|
|
|
|
def norm(s: str) -> str: |
|
|
return re.sub(r"\s+", " ", s).strip() |
|
|
|
|
|
|
|
|
def is_dup(q: str, qs: list[str], thr: float = 0.85) -> bool: |
|
|
ql = q.lower() |
|
|
for x in qs: |
|
|
if SequenceMatcher(None, ql, x.lower()).ratio() >= thr: |
|
|
return True |
|
|
return False |
|
|
|
|
|
|
|
|
def extract_answers(ctx: str, max_n: int = 60) -> list[str]: |
|
|
ctx = norm(ctx) |
|
|
answers, seen = [], set() |
|
|
|
|
|
for m in PATTERN.finditer(ctx): |
|
|
for g in m.groups(): |
|
|
if not g: |
|
|
continue |
|
|
g = norm(g) |
|
|
k = g.lower() |
|
|
if 3 <= len(g) <= 120 and k not in seen: |
|
|
seen.add(k) |
|
|
answers.append(g) |
|
|
if len(answers) >= max_n: |
|
|
return answers |
|
|
|
|
|
if len(answers) < 8: |
|
|
for i in range(0, min(len(ctx), 500), 60): |
|
|
ch = norm(ctx[i : i + 60]) |
|
|
k = ch.lower() |
|
|
if len(ch) >= 15 and k not in seen: |
|
|
seen.add(k) |
|
|
answers.append(ch) |
|
|
if len(answers) >= max_n: |
|
|
break |
|
|
|
|
|
if not answers and ctx: |
|
|
answers = [ctx[:120]] |
|
|
|
|
|
return answers |
|
|
|
|
|
|
|
|
def load_model(): |
|
|
tok = T5Tokenizer.from_pretrained(MODEL_DIR) |
|
|
mdl = T5ForConditionalGeneration.from_pretrained(MODEL_DIR) |
|
|
|
|
|
dev = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
try: |
|
|
mdl = mdl.to(dev) |
|
|
except RuntimeError: |
|
|
dev = "cpu" |
|
|
try: |
|
|
torch.cuda.empty_cache() |
|
|
except Exception: |
|
|
pass |
|
|
mdl = mdl.to(dev) |
|
|
|
|
|
mdl.eval() |
|
|
return tok, mdl, dev |
|
|
|
|
|
|
|
|
tokenizer, model, device = load_model() |
|
|
|
|
|
|
|
|
def generate_questions(context: str, n: int = 20) -> list[str]: |
|
|
ctx = norm(context) |
|
|
answers = extract_answers(ctx, max_n=80) |
|
|
questions = [] |
|
|
|
|
|
gen_cfg = dict( |
|
|
do_sample=True, |
|
|
top_k=80, |
|
|
top_p=0.98, |
|
|
temperature=1.05, |
|
|
max_new_tokens=72, |
|
|
no_repeat_ngram_size=3, |
|
|
repetition_penalty=1.08, |
|
|
) |
|
|
|
|
|
num_ret = 8 if n <= 20 else 10 |
|
|
|
|
|
def run_prompt(ans: str, rounds: int): |
|
|
nonlocal gen_cfg |
|
|
prompt = f"answer: {ans}\ncontext: {ctx}\nquestion:" |
|
|
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512).to(device) |
|
|
|
|
|
for _ in range(rounds): |
|
|
outs = model.generate(**inputs, num_return_sequences=num_ret, **gen_cfg) |
|
|
added = 0 |
|
|
for o in outs: |
|
|
q = norm(tokenizer.decode(o, skip_special_tokens=True)) |
|
|
if not q: |
|
|
continue |
|
|
if not q.endswith("?"): |
|
|
q += "?" |
|
|
if len(q) >= 6 and not is_dup(q, questions, thr=0.85): |
|
|
questions.append(q) |
|
|
added += 1 |
|
|
if len(questions) >= n: |
|
|
return |
|
|
if added == 0: |
|
|
gen_cfg["temperature"] = min(1.25, gen_cfg["temperature"] + 0.05) |
|
|
gen_cfg["top_p"] = min(0.995, gen_cfg["top_p"] + 0.005) |
|
|
|
|
|
with torch.inference_mode(): |
|
|
for ans in answers: |
|
|
if len(questions) >= n: |
|
|
break |
|
|
run_prompt(ans, rounds=6) |
|
|
|
|
|
if len(questions) < n: |
|
|
run_prompt(ctx[:120], rounds=12) |
|
|
|
|
|
return questions[:n] |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
ctx = input("\nNhập đoạn văn bản:\n").strip() |
|
|
try: |
|
|
n = int((input("\nNhập số lượng câu hỏi cần sinh: ").strip() or "")) |
|
|
except ValueError: |
|
|
n = 20 |
|
|
|
|
|
n = max(1, min(n, 200)) |
|
|
qs = generate_questions(ctx, n) |
|
|
|
|
|
print("\nCác câu hỏi sinh ra:") |
|
|
for i, q in enumerate(qs, 1): |
|
|
print(f"{i}. {q}") |
|
|
|