File size: 8,588 Bytes
d1693da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
"""
Fetch data from HuggingFace dataset undertheseanlp/UVB-v0.1
- Get 5,000 high-quality sentences from fiction books
- Get 5,000 high-quality sentences from non-fiction books
"""

import re
from os.path import dirname, join

from datasets import load_dataset
from underthesea import sent_tokenize, text_normalize


# Fiction-related genres
FICTION_GENRES = {
    "Fiction", "Novels", "Romance", "Fantasy", "Science Fiction",
    "Mystery", "Thriller", "Horror", "Historical Fiction", "Literary Fiction",
    "Adventure", "Crime", "Suspense", "Drama", "Short Stories"
}

# Non-fiction related genres
NON_FICTION_GENRES = {
    "Non Fiction", "Nonfiction", "History", "Biography", "Autobiography",
    "Self Help", "Psychology", "Philosophy", "Science", "Politics",
    "Economics", "Business", "Education", "Travel", "Memoir",
    "Essays", "Reference", "Health", "Religion", "Spirituality"
}


def clean_text(text):
    """Remove formatting and clean text."""
    # Normalize Unicode using underthesea
    text = text_normalize(text)
    # Remove markdown headers
    text = re.sub(r'^#+\s+', '', text, flags=re.MULTILINE)
    # Remove bold/italic markers
    text = re.sub(r'\*+', '', text)
    # Remove horizontal rules
    text = re.sub(r'^-+$', '', text, flags=re.MULTILINE)
    # Remove links
    text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text)
    # Remove multiple newlines
    text = re.sub(r'\n{2,}', '\n', text)
    # Remove leading/trailing whitespace per line
    lines = [line.strip() for line in text.split('\n')]
    text = '\n'.join(lines)
    return text


def is_high_quality_sentence(sent):
    """Check if sentence is high quality for UD annotation."""
    sent = sent.strip()

    if not sent:
        return False, sent

    # Length constraints
    if len(sent) < 30:  # Minimum length for meaningful sentence
        return False, sent
    if len(sent) > 250:  # Maximum length
        return False, sent

    # Word count constraints
    words = sent.split()
    if len(words) < 5:  # At least 5 words
        return False, sent
    if len(words) > 40:  # Max 40 words
        return False, sent

    # Must start with uppercase letter (proper sentence)
    if not sent[0].isupper():
        return False, sent

    # Must end with proper punctuation
    if not sent.rstrip()[-1] in '.!?…"»':
        return False, sent

    # Skip if mostly uppercase (headers, titles)
    if sum(1 for c in sent if c.isupper()) > len(sent) * 0.3:
        return False, sent

    # Must contain Vietnamese characters
    if not re.search(r'[àáảãạăắằẳẵặâấầẩẫậèéẻẽẹêếềểễệìíỉĩịòóỏõọôốồổỗộơớờởỡợùúủũụưứừửữựỳýỷỹỵđ]', sent, re.IGNORECASE):
        return False, sent

    # Skip sentences with too many numbers (tables, lists)
    num_digits = sum(1 for c in sent if c.isdigit())
    if num_digits > len(sent) * 0.15:
        return False, sent

    # Skip sentences with special patterns
    if re.match(r'^(Chương|Phần|Mục|Điều|\d+\.|\([a-z]\))', sent):
        return False, sent

    # Skip sentences with URLs or emails
    if re.search(r'(http|www\.|@|\.com|\.vn)', sent, re.IGNORECASE):
        return False, sent

    # Skip sentences with excessive punctuation
    punct_count = sum(1 for c in sent if c in '.,;:!?-–—()[]{}""\'\'«»')
    if punct_count > len(words) * 1.5:
        return False, sent

    # Skip incomplete sentences (ending with ellipsis in middle)
    if '...' in sent[:-5]:
        return False, sent

    # Skip dialogue-heavy sentences (too many quotes)
    quote_count = sent.count('"') + sent.count('"') + sent.count('"')
    if quote_count > 4:
        return False, sent

    return True, sent


def classify_book(genres):
    """Classify book as fiction or non-fiction based on genres."""
    if not genres:
        return None

    genres_set = set(genres)

    is_fiction = bool(genres_set & FICTION_GENRES)
    is_non_fiction = bool(genres_set & NON_FICTION_GENRES)

    if is_fiction and not is_non_fiction:
        return "fiction"
    elif is_non_fiction and not is_fiction:
        return "non-fiction"
    elif is_fiction and is_non_fiction:
        # Prefer the dominant one
        fiction_count = len(genres_set & FICTION_GENRES)
        non_fiction_count = len(genres_set & NON_FICTION_GENRES)
        return "fiction" if fiction_count > non_fiction_count else "non-fiction"

    return None


def extract_sentences_from_book(content, max_sentences=500):
    """Extract high-quality sentences from book content."""
    content = clean_text(content)
    sentences = sent_tokenize(content)

    valid_sentences = []
    for sent in sentences:
        is_valid, cleaned_sent = is_high_quality_sentence(sent)
        if is_valid:
            valid_sentences.append(cleaned_sent)
            if len(valid_sentences) >= max_sentences:
                break

    return valid_sentences


def fetch_and_process():
    print("Loading UVB-v0.1 dataset from HuggingFace...")
    ds = load_dataset("undertheseanlp/UVB-v0.1", split="train")

    print(f"Total books in dataset: {len(ds)}")

    # Classify books
    fiction_books = []
    non_fiction_books = []

    for book in ds:
        genres = book.get("genres", [])
        rating = book.get("goodreads_rating", 0) or 0
        num_ratings = book.get("goodreads_num_ratings", 0) or 0

        # Quality filter: prefer books with good ratings
        quality_score = rating * min(num_ratings / 100, 10)  # Weight by rating count

        book_type = classify_book(genres)
        book_info = {
            "title": book["title"],
            "content": book["content"],
            "rating": rating,
            "num_ratings": num_ratings,
            "quality_score": quality_score,
            "genres": genres
        }

        if book_type == "fiction":
            fiction_books.append(book_info)
        elif book_type == "non-fiction":
            non_fiction_books.append(book_info)

    print(f"Fiction books: {len(fiction_books)}")
    print(f"Non-fiction books: {len(non_fiction_books)}")

    # Sort by quality score (higher is better)
    fiction_books.sort(key=lambda x: x["quality_score"], reverse=True)
    non_fiction_books.sort(key=lambda x: x["quality_score"], reverse=True)

    # Extract sentences from fiction books
    print("\nExtracting sentences from fiction books...")
    fiction_sentences = []
    for i, book in enumerate(fiction_books):
        if len(fiction_sentences) >= 5000:
            break
        sentences = extract_sentences_from_book(book["content"])
        for sent in sentences:
            if len(fiction_sentences) >= 5000:
                break
            fiction_sentences.append(sent)
        print(f"  [{i+1}/{len(fiction_books)}] {book['title'][:50]} - {len(sentences)} sentences (total: {len(fiction_sentences)})")

    # Extract sentences from non-fiction books
    print("\nExtracting sentences from non-fiction books...")
    non_fiction_sentences = []
    for i, book in enumerate(non_fiction_books):
        if len(non_fiction_sentences) >= 5000:
            break
        sentences = extract_sentences_from_book(book["content"])
        for sent in sentences:
            if len(non_fiction_sentences) >= 5000:
                break
            non_fiction_sentences.append(sent)
        print(f"  [{i+1}/{len(non_fiction_books)}] {book['title'][:50]} - {len(sentences)} sentences (total: {len(non_fiction_sentences)})")

    print(f"\nFiction sentences collected: {len(fiction_sentences)}")
    print(f"Non-fiction sentences collected: {len(non_fiction_sentences)}")

    # Combine all sentences
    all_sentences = fiction_sentences[:5000] + non_fiction_sentences[:5000]
    print(f"Total sentences: {len(all_sentences)}")

    # Save to output file
    output_dir = dirname(dirname(__file__))
    output_file = join(output_dir, "sentences_uvb.txt")

    with open(output_file, "w", encoding="utf-8") as f:
        for i, sent in enumerate(all_sentences, 1):
            source = "fiction" if i <= len(fiction_sentences[:5000]) else "non-fiction"
            f.write(f"{i}\t{source}\t{sent}\n")

    print(f"\nSaved to: {output_file}")

    # Print samples
    print("\nSample fiction sentences:")
    for i, sent in enumerate(fiction_sentences[:3], 1):
        print(f"  {i}. {sent[:100]}...")

    print("\nSample non-fiction sentences:")
    for i, sent in enumerate(non_fiction_sentences[:3], 1):
        print(f"  {i}. {sent[:100]}...")


if __name__ == "__main__":
    fetch_and_process()