File size: 2,298 Bytes
ba61e47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
"""
create_cloze_qa_dataset.py
-------------------------------------
Generate Cloze-style QA dataset from WikiText-2.
Each sentence produces one 'fill-in-the-blank' question
with a single correct answer.

Output: JSONL files for train / validation / test.
"""

from datasets import load_dataset
import re
import json
from pathlib import Path
import random

# Load WikiText-2
print("🔹 Loading WikiText-2 dataset ...")
dataset = load_dataset("wikitext", "wikitext-2-raw-v1")

# Output directories
output_dir = Path("cloze_qa_dataset")
output_dir.mkdir(exist_ok=True, parents=True)

def create_cloze_question(sentence: str):
    """
    Convert a sentence into a Cloze-style question by masking one entity/keyword.
    Returns (question, answer) or None if unsuitable.
    """
    words = re.findall(r"\b[A-Z][a-zA-Z]+\b", sentence)  # find capitalized words (possible entities)
    if not words:
        return None

    answer = random.choice(words)
    question = sentence.replace(answer, "____", 1)
    if question == sentence or len(answer) < 3:
        return None

    return question.strip(), answer.strip()

def generate_qa_split(split_name, data):
    """
    Generate QA pairs for each sentence in the given split.
    """
    output_path = output_dir / f"{split_name}.jsonl"
    count = 0
    with open(output_path, "w", encoding="utf-8") as f:
        for doc_id, text in enumerate(data["text"]):
            if not text.strip():
                continue
            sentences = re.split(r'(?<=[.!?]) +', text.strip())
            for sent_id, sent in enumerate(sentences):
                qa = create_cloze_question(sent)
                if qa:
                    question, answer = qa
                    record = {
                        "doc_id": doc_id,
                        "sent_id": sent_id,
                        "title": None,
                        "question": question,
                        "answer": answer
                    }
                    f.write(json.dumps(record, ensure_ascii=False) + "\n")
                    count += 1
        print(f" Saved {count} QA pairs to {output_path}")

# Generate datasets
for split in ["train", "validation", "test"]:
    generate_qa_split(split, dataset[split])

print("\nAll splits processed and saved successfully")