DANGDOCAO commited on
Commit
a384a74
·
verified ·
1 Parent(s): ed6a592
Files changed (1) hide show
  1. HVU_QA/fine_tune_qg.py +153 -104
HVU_QA/fine_tune_qg.py CHANGED
@@ -1,104 +1,153 @@
1
- import json
2
- from datasets import Dataset
3
- from sklearn.model_selection import train_test_split
4
- from transformers import (
5
- T5Tokenizer,
6
- T5ForConditionalGeneration,
7
- TrainingArguments,
8
- Trainer
9
- )
10
-
11
- def load_squad_data(file_path):
12
-
13
- with open(file_path, "r", encoding="utf-8") as f:
14
- squad_data = json.load(f)
15
-
16
- data = []
17
- for article in squad_data["data"]:
18
- for paragraph in article["paragraphs"]:
19
- context = paragraph.get("context", "")
20
- for qa in paragraph["qas"]:
21
- if not qa.get("is_impossible", False) and qa.get("answers"):
22
- answer = qa["answers"][0]["text"]
23
- question = qa["question"]
24
- input_text = f"answer: {answer} context: {context}"
25
- data.append({"input": input_text, "target": question})
26
- return data
27
-
28
- def preprocess_function(example, tokenizer, max_input_length=512, max_target_length=64):
29
-
30
- model_inputs = tokenizer(
31
- example["input"],
32
- max_length=max_input_length,
33
- padding="max_length",
34
- truncation=True,
35
- )
36
- labels = tokenizer(
37
- text_target=example["target"],
38
- max_length=max_target_length,
39
- padding="max_length",
40
- truncation=True,
41
- )
42
- model_inputs["labels"] = labels["input_ids"]
43
- return model_inputs
44
-
45
- def main():
46
- data_path = "30ktrain.json"
47
- output_dir = "t5-viet-qg-finetuned"
48
- logs_dir = "logs"
49
- model_name = "VietAI/vit5-base"
50
-
51
- print("Tải hình và tokenizer...")
52
- tokenizer = T5Tokenizer.from_pretrained(model_name)
53
- model = T5ForConditionalGeneration.from_pretrained(model_name)
54
-
55
- print("Đọc chia dữ liệu...")
56
- raw_data = load_squad_data(data_path)
57
- train_data, val_data = train_test_split(raw_data, test_size=0.2, random_state=42)
58
-
59
- train_dataset = Dataset.from_list(train_data)
60
- val_dataset = Dataset.from_list(val_data)
61
-
62
- tokenized_train = train_dataset.map(
63
- lambda x: preprocess_function(x, tokenizer),
64
- batched=True,
65
- remove_columns=["input", "target"]
66
- )
67
- tokenized_val = val_dataset.map(
68
- lambda x: preprocess_function(x, tokenizer),
69
- batched=True,
70
- remove_columns=["input", "target"]
71
- )
72
-
73
- print("Cấu hình huấn luyện...")
74
- training_args = TrainingArguments(
75
- output_dir=output_dir,
76
- overwrite_output_dir=True,
77
- per_device_train_batch_size=1,
78
- gradient_accumulation_steps=1,
79
- num_train_epochs=3,
80
- learning_rate=2e-4,
81
- weight_decay=0.01,
82
- warmup_steps=0,
83
- logging_dir=logs_dir,
84
- logging_steps=10,
85
- fp16=False
86
- )
87
-
88
- print("Huấn luyện mô hình...")
89
- trainer = Trainer(
90
- model=model,
91
- args=training_args,
92
- train_dataset=tokenized_train,
93
- eval_dataset=tokenized_val,
94
- tokenizer=tokenizer,
95
- )
96
- trainer.train()
97
-
98
- print("Lưu mô hình...")
99
- model.save_pretrained(output_dir)
100
- tokenizer.save_pretrained(output_dir)
101
- print("Huấn luyện hoàn tất!")
102
-
103
- if __name__ == "__main__":
104
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import torch
3
+ from difflib import SequenceMatcher
4
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
5
+ from transformers.utils import logging as hf_logging
6
+
7
+ hf_logging.set_verbosity_error()
8
+
9
+ MODEL_DIR = "t5-viet-qg-finetuned"
10
+
11
+ PATTERN = re.compile(
12
+ r"""
13
+ ([^”]{3,120})
14
+ | "([^"]{3,120})"
15
+ | \b(?:là|gồm|do|theo)\s+([^,.;:\n]{3,120})
16
+ | \b\d{4}\b
17
+ | \b(?:Điều|Khoản)\s+\d+\b
18
+ """,
19
+ re.VERBOSE | re.IGNORECASE,
20
+ )
21
+
22
+
23
+ def norm(s: str) -> str:
24
+ return re.sub(r"\s+", " ", s).strip()
25
+
26
+
27
+ def is_dup(q: str, qs: list[str], thr: float = 0.85) -> bool:
28
+ ql = q.lower()
29
+ for x in qs:
30
+ if SequenceMatcher(None, ql, x.lower()).ratio() >= thr:
31
+ return True
32
+ return False
33
+
34
+
35
+ def extract_answers(ctx: str, max_n: int = 60) -> list[str]:
36
+ ctx = norm(ctx)
37
+ answers, seen = [], set()
38
+
39
+ for m in PATTERN.finditer(ctx):
40
+ for g in m.groups():
41
+ if not g:
42
+ continue
43
+ g = norm(g)
44
+ k = g.lower()
45
+ if 3 <= len(g) <= 120 and k not in seen:
46
+ seen.add(k)
47
+ answers.append(g)
48
+ if len(answers) >= max_n:
49
+ return answers
50
+
51
+ if len(answers) < 8:
52
+ for i in range(0, min(len(ctx), 500), 60):
53
+ ch = norm(ctx[i : i + 60])
54
+ k = ch.lower()
55
+ if len(ch) >= 15 and k not in seen:
56
+ seen.add(k)
57
+ answers.append(ch)
58
+ if len(answers) >= max_n:
59
+ break
60
+
61
+ if not answers and ctx:
62
+ answers = [ctx[:120]]
63
+
64
+ return answers
65
+
66
+
67
+ def load_model():
68
+ tok = T5Tokenizer.from_pretrained(MODEL_DIR)
69
+ mdl = T5ForConditionalGeneration.from_pretrained(MODEL_DIR)
70
+
71
+ dev = "cuda" if torch.cuda.is_available() else "cpu"
72
+ try:
73
+ mdl = mdl.to(dev)
74
+ except RuntimeError:
75
+ dev = "cpu"
76
+ try:
77
+ torch.cuda.empty_cache()
78
+ except Exception:
79
+ pass
80
+ mdl = mdl.to(dev)
81
+
82
+ mdl.eval()
83
+ return tok, mdl, dev
84
+
85
+
86
+ tokenizer, model, device = load_model()
87
+
88
+
89
+ def generate_questions(context: str, n: int = 20) -> list[str]:
90
+ ctx = norm(context)
91
+ answers = extract_answers(ctx, max_n=80)
92
+ questions = []
93
+
94
+ gen_cfg = dict(
95
+ do_sample=True,
96
+ top_k=80,
97
+ top_p=0.98,
98
+ temperature=1.05,
99
+ max_new_tokens=72,
100
+ no_repeat_ngram_size=3,
101
+ repetition_penalty=1.08,
102
+ )
103
+
104
+ num_ret = 8 if n <= 20 else 10
105
+
106
+ def run_prompt(ans: str, rounds: int):
107
+ nonlocal gen_cfg
108
+ prompt = f"answer: {ans}\ncontext: {ctx}\nquestion:"
109
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512).to(device)
110
+
111
+ for _ in range(rounds):
112
+ outs = model.generate(**inputs, num_return_sequences=num_ret, **gen_cfg)
113
+ added = 0
114
+ for o in outs:
115
+ q = norm(tokenizer.decode(o, skip_special_tokens=True))
116
+ if not q:
117
+ continue
118
+ if not q.endswith("?"):
119
+ q += "?"
120
+ if len(q) >= 6 and not is_dup(q, questions, thr=0.85):
121
+ questions.append(q)
122
+ added += 1
123
+ if len(questions) >= n:
124
+ return
125
+ if added == 0:
126
+ gen_cfg["temperature"] = min(1.25, gen_cfg["temperature"] + 0.05)
127
+ gen_cfg["top_p"] = min(0.995, gen_cfg["top_p"] + 0.005)
128
+
129
+ with torch.inference_mode():
130
+ for ans in answers:
131
+ if len(questions) >= n:
132
+ break
133
+ run_prompt(ans, rounds=6)
134
+
135
+ if len(questions) < n:
136
+ run_prompt(ctx[:120], rounds=12)
137
+
138
+ return questions[:n]
139
+
140
+
141
+ if __name__ == "__main__":
142
+ ctx = input("\nNhập đoạn văn bản:\n").strip()
143
+ try:
144
+ n = int((input("\nNhập số lượng câu hỏi cần sinh: ").strip() or "20"))
145
+ except ValueError:
146
+ n = 20
147
+
148
+ n = max(1, min(n, 200))
149
+ qs = generate_questions(ctx, n)
150
+
151
+ print("\nCác câu hỏi sinh ra:")
152
+ for i, q in enumerate(qs, 1):
153
+ print(f"{i}. {q}")