DANGDOCAO commited on
Commit
7ba0550
·
verified ·
1 Parent(s): 918c6bd

Delete fine_tune_qg.py

Browse files
Files changed (1) hide show
  1. fine_tune_qg.py +0 -102
fine_tune_qg.py DELETED
@@ -1,102 +0,0 @@
1
- import json
2
- from datasets import Dataset
3
- from sklearn.model_selection import train_test_split
4
- from transformers import (
5
- T5Tokenizer,
6
- T5ForConditionalGeneration,
7
- TrainingArguments,
8
- Trainer
9
- )
10
-
11
- def load_squad_data(file_path):
12
- with open(file_path, "r", encoding="utf-8") as f:
13
- squad_data = json.load(f)
14
-
15
- data = []
16
- for article in squad_data["data"]:
17
- context = article.get("title", "")
18
- for paragraph in article["paragraphs"]:
19
- for qa in paragraph["qas"]:
20
- if not qa.get("is_impossible", False) and qa.get("answers"):
21
- answer = qa["answers"][0]["text"]
22
- question = qa["question"]
23
- input_text = f"answer: {answer} context: {context}"
24
- data.append({"input": input_text, "target": question})
25
- return data
26
-
27
- def preprocess_function(example, tokenizer, max_input_length=512, max_target_length=64):
28
- model_inputs = tokenizer(
29
- example["input"],
30
- max_length=max_input_length,
31
- padding="max_length",
32
- truncation=True,
33
- )
34
- labels = tokenizer(
35
- text_target=example["target"],
36
- max_length=max_target_length,
37
- padding="max_length",
38
- truncation=True,
39
- )
40
- model_inputs["labels"] = labels["input_ids"]
41
- return model_inputs
42
-
43
- def main():
44
- data_path = "30ktrain.json"
45
- output_dir = "t5-viet-qg-finetuned"
46
- logs_dir = "logs"
47
- model_name = "VietAI/vit5-base"
48
-
49
- print("📥 Tải mô hình và tokenizer...")
50
- tokenizer = T5Tokenizer.from_pretrained(model_name)
51
- model = T5ForConditionalGeneration.from_pretrained(model_name)
52
-
53
- print("📚 Đọc và chia dữ liệu...")
54
- raw_data = load_squad_data(data_path)
55
- train_data, val_data = train_test_split(raw_data, test_size=0.2, random_state=42)
56
-
57
- train_dataset = Dataset.from_list(train_data)
58
- val_dataset = Dataset.from_list(val_data)
59
-
60
- tokenized_train = train_dataset.map(
61
- lambda x: preprocess_function(x, tokenizer),
62
- batched=True,
63
- remove_columns=["input", "target"]
64
- )
65
- tokenized_val = val_dataset.map(
66
- lambda x: preprocess_function(x, tokenizer),
67
- batched=True,
68
- remove_columns=["input", "target"]
69
- )
70
-
71
- print("⚙️ Cấu hình huấn luyện...")
72
- training_args = TrainingArguments(
73
- output_dir=output_dir,
74
- overwrite_output_dir=True,
75
- per_device_train_batch_size=1,
76
- gradient_accumulation_steps=1,
77
- num_train_epochs=3,
78
- learning_rate=2e-4,
79
- weight_decay=0.01,
80
- warmup_steps=0,
81
- logging_dir=logs_dir,
82
- logging_steps=10,
83
- fp16=False
84
- )
85
-
86
- print("🚀 Huấn luyện mô hình...")
87
- trainer = Trainer(
88
- model=model,
89
- args=training_args,
90
- train_dataset=tokenized_train,
91
- eval_dataset=tokenized_val, # vẫn truyền để bạn có thể eval thủ công sau
92
- tokenizer=tokenizer,
93
- )
94
- trainer.train()
95
-
96
- print("💾 Lưu mô hình...")
97
- model.save_pretrained(output_dir)
98
- tokenizer.save_pretrained(output_dir)
99
- print("✅ Huấn luyện hoàn tất!")
100
-
101
- if __name__ == "__main__":
102
- main()