|
|
| import json
|
| from difflib import SequenceMatcher
|
| from transformers import T5Tokenizer, T5ForConditionalGeneration
|
| from transformers.utils import logging as hf_logging
|
|
|
|
|
| hf_logging.set_verbosity_error()
|
|
|
|
|
| MODEL_DIR = "t5-viet-qg-finetuned"
|
| DATA_PATH = "30ktrain.json"
|
|
|
|
|
| tokenizer = T5Tokenizer.from_pretrained(MODEL_DIR)
|
| model = T5ForConditionalGeneration.from_pretrained(MODEL_DIR)
|
|
|
| def find_best_match_from_context(user_context, squad_data):
|
| """
|
| Tìm bản ghi gần nhất dựa trên article.title (giữ đúng logic code gốc).
|
| Trả về tuple (context_title, answer_text, question_text) hoặc None.
|
| """
|
| best_score, best_entry = 0.0, None
|
| ui = user_context.lower()
|
|
|
| for article in squad_data.get("data", []):
|
| context_title = article.get("title", "")
|
| score_title = SequenceMatcher(None, ui, context_title.lower()).ratio()
|
|
|
| for paragraph in article.get("paragraphs", []):
|
| for qa in paragraph.get("qas", []):
|
| answers = qa.get("answers", [])
|
| if not answers:
|
| continue
|
| answer_text = answers[0].get("text", "").strip()
|
| question_text = qa.get("question", "").strip()
|
|
|
| score = score_title
|
| if score > best_score:
|
| best_score = score
|
| best_entry = (context_title, answer_text, question_text)
|
|
|
| return best_entry
|
|
|
| def _near_duplicate(q, seen, thr=0.90):
|
| """Loại câu gần trùng dựa trên tỉ lệ giống nhau."""
|
| for s in seen:
|
| if SequenceMatcher(None, q, s).ratio() >= thr:
|
| return True
|
| return False
|
|
|
| def generate_questions(user_context,
|
| total_questions=20,
|
| batch_size=10,
|
| top_k=60,
|
| top_p=0.95,
|
| temperature=0.9,
|
| max_input_len=512,
|
| max_new_tokens=64):
|
|
|
| with open(DATA_PATH, "r", encoding="utf-8") as f:
|
| squad_data = json.load(f)
|
|
|
|
|
| best_entry = find_best_match_from_context(user_context, squad_data)
|
| if best_entry is None:
|
| print("❌ Không tìm thấy dữ liệu phù hợp trong file JSON.")
|
| return
|
|
|
| _, answer, _ = best_entry
|
|
|
|
|
| input_text = f"answer: {answer} context: {user_context}"
|
| inputs = tokenizer(
|
| input_text,
|
| return_tensors="pt",
|
| truncation=True,
|
| max_length=max_input_len
|
| )
|
|
|
|
|
| unique_questions = []
|
| remaining = total_questions
|
|
|
| while remaining > 0:
|
| n = min(batch_size, remaining)
|
| outputs = model.generate(
|
| **inputs,
|
| do_sample=True,
|
| top_k=top_k,
|
| top_p=top_p,
|
| temperature=temperature,
|
| max_new_tokens=max_new_tokens,
|
| num_return_sequences=n,
|
| no_repeat_ngram_size=3,
|
| repetition_penalty=1.12
|
| )
|
|
|
| for out in outputs:
|
| q = tokenizer.decode(out, skip_special_tokens=True).strip()
|
| if len(q) < 5:
|
| continue
|
| if not _near_duplicate(q, unique_questions, thr=0.90):
|
| unique_questions.append(q)
|
|
|
| remaining = total_questions - len(unique_questions)
|
| if remaining <= 0:
|
| break
|
|
|
|
|
| unique_questions = unique_questions[:total_questions]
|
|
|
| print("✅ Các câu hỏi mới được sinh ra:")
|
| for i, q in enumerate(unique_questions, 1):
|
| print(f"{i}. {q}")
|
|
|
| if __name__ == "__main__":
|
|
|
| user_context = input("\nNhập đoạn văn bản:\n ").strip()
|
|
|
|
|
| raw_n = input("\nNhập vào số lượng câu hỏi bạn cần:").strip()
|
| if raw_n == "":
|
| total_questions = 20
|
| else:
|
| try:
|
| total_questions = int(raw_n)
|
| except ValueError:
|
| print("⚠️ Giá trị không hợp lệ. Dùng mặc định 20.")
|
| total_questions = 20
|
|
|
|
|
| if total_questions < 1:
|
| total_questions = 1
|
| if total_questions > 200:
|
| print("⚠️ Giới hạn tối đa 200 câu. Sẽ sinh 200 câu.")
|
| total_questions = 200
|
|
|
|
|
| batch_size = 10 if total_questions >= 30 else min(10, total_questions)
|
|
|
|
|
| print("\n🔍 Đang phân tích dữ liệu...\n")
|
|
|
| generate_questions(
|
| user_context=user_context,
|
| total_questions=total_questions,
|
| batch_size=batch_size,
|
| top_k=60,
|
| top_p=0.95,
|
| temperature=0.9,
|
| max_input_len=512,
|
| max_new_tokens=64
|
| )
|
|
|