UDD-1 / scripts /fetch_uvb_data.py
rain1024's picture
Restructure udtools and add utility scripts
d1693da
"""
Fetch data from HuggingFace dataset undertheseanlp/UVB-v0.1
- Get 5,000 high-quality sentences from fiction books
- Get 5,000 high-quality sentences from non-fiction books
"""
import re
from os.path import dirname, join
from datasets import load_dataset
from underthesea import sent_tokenize, text_normalize
# Fiction-related genres
FICTION_GENRES = {
"Fiction", "Novels", "Romance", "Fantasy", "Science Fiction",
"Mystery", "Thriller", "Horror", "Historical Fiction", "Literary Fiction",
"Adventure", "Crime", "Suspense", "Drama", "Short Stories"
}
# Non-fiction related genres
NON_FICTION_GENRES = {
"Non Fiction", "Nonfiction", "History", "Biography", "Autobiography",
"Self Help", "Psychology", "Philosophy", "Science", "Politics",
"Economics", "Business", "Education", "Travel", "Memoir",
"Essays", "Reference", "Health", "Religion", "Spirituality"
}
def clean_text(text):
"""Remove formatting and clean text."""
# Normalize Unicode using underthesea
text = text_normalize(text)
# Remove markdown headers
text = re.sub(r'^#+\s+', '', text, flags=re.MULTILINE)
# Remove bold/italic markers
text = re.sub(r'\*+', '', text)
# Remove horizontal rules
text = re.sub(r'^-+$', '', text, flags=re.MULTILINE)
# Remove links
text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text)
# Remove multiple newlines
text = re.sub(r'\n{2,}', '\n', text)
# Remove leading/trailing whitespace per line
lines = [line.strip() for line in text.split('\n')]
text = '\n'.join(lines)
return text
def is_high_quality_sentence(sent):
"""Check if sentence is high quality for UD annotation."""
sent = sent.strip()
if not sent:
return False, sent
# Length constraints
if len(sent) < 30: # Minimum length for meaningful sentence
return False, sent
if len(sent) > 250: # Maximum length
return False, sent
# Word count constraints
words = sent.split()
if len(words) < 5: # At least 5 words
return False, sent
if len(words) > 40: # Max 40 words
return False, sent
# Must start with uppercase letter (proper sentence)
if not sent[0].isupper():
return False, sent
# Must end with proper punctuation
if not sent.rstrip()[-1] in '.!?…"»':
return False, sent
# Skip if mostly uppercase (headers, titles)
if sum(1 for c in sent if c.isupper()) > len(sent) * 0.3:
return False, sent
# Must contain Vietnamese characters
if not re.search(r'[àáảãạăắằẳẵặâấầẩẫậèéẻẽẹêếềểễệìíỉĩịòóỏõọôốồổỗộơớờởỡợùúủũụưứừửữựỳýỷỹỵđ]', sent, re.IGNORECASE):
return False, sent
# Skip sentences with too many numbers (tables, lists)
num_digits = sum(1 for c in sent if c.isdigit())
if num_digits > len(sent) * 0.15:
return False, sent
# Skip sentences with special patterns
if re.match(r'^(Chương|Phần|Mục|Điều|\d+\.|\([a-z]\))', sent):
return False, sent
# Skip sentences with URLs or emails
if re.search(r'(http|www\.|@|\.com|\.vn)', sent, re.IGNORECASE):
return False, sent
# Skip sentences with excessive punctuation
punct_count = sum(1 for c in sent if c in '.,;:!?-–—()[]{}""\'\'«»')
if punct_count > len(words) * 1.5:
return False, sent
# Skip incomplete sentences (ending with ellipsis in middle)
if '...' in sent[:-5]:
return False, sent
# Skip dialogue-heavy sentences (too many quotes)
quote_count = sent.count('"') + sent.count('"') + sent.count('"')
if quote_count > 4:
return False, sent
return True, sent
def classify_book(genres):
"""Classify book as fiction or non-fiction based on genres."""
if not genres:
return None
genres_set = set(genres)
is_fiction = bool(genres_set & FICTION_GENRES)
is_non_fiction = bool(genres_set & NON_FICTION_GENRES)
if is_fiction and not is_non_fiction:
return "fiction"
elif is_non_fiction and not is_fiction:
return "non-fiction"
elif is_fiction and is_non_fiction:
# Prefer the dominant one
fiction_count = len(genres_set & FICTION_GENRES)
non_fiction_count = len(genres_set & NON_FICTION_GENRES)
return "fiction" if fiction_count > non_fiction_count else "non-fiction"
return None
def extract_sentences_from_book(content, max_sentences=500):
"""Extract high-quality sentences from book content."""
content = clean_text(content)
sentences = sent_tokenize(content)
valid_sentences = []
for sent in sentences:
is_valid, cleaned_sent = is_high_quality_sentence(sent)
if is_valid:
valid_sentences.append(cleaned_sent)
if len(valid_sentences) >= max_sentences:
break
return valid_sentences
def fetch_and_process():
print("Loading UVB-v0.1 dataset from HuggingFace...")
ds = load_dataset("undertheseanlp/UVB-v0.1", split="train")
print(f"Total books in dataset: {len(ds)}")
# Classify books
fiction_books = []
non_fiction_books = []
for book in ds:
genres = book.get("genres", [])
rating = book.get("goodreads_rating", 0) or 0
num_ratings = book.get("goodreads_num_ratings", 0) or 0
# Quality filter: prefer books with good ratings
quality_score = rating * min(num_ratings / 100, 10) # Weight by rating count
book_type = classify_book(genres)
book_info = {
"title": book["title"],
"content": book["content"],
"rating": rating,
"num_ratings": num_ratings,
"quality_score": quality_score,
"genres": genres
}
if book_type == "fiction":
fiction_books.append(book_info)
elif book_type == "non-fiction":
non_fiction_books.append(book_info)
print(f"Fiction books: {len(fiction_books)}")
print(f"Non-fiction books: {len(non_fiction_books)}")
# Sort by quality score (higher is better)
fiction_books.sort(key=lambda x: x["quality_score"], reverse=True)
non_fiction_books.sort(key=lambda x: x["quality_score"], reverse=True)
# Extract sentences from fiction books
print("\nExtracting sentences from fiction books...")
fiction_sentences = []
for i, book in enumerate(fiction_books):
if len(fiction_sentences) >= 5000:
break
sentences = extract_sentences_from_book(book["content"])
for sent in sentences:
if len(fiction_sentences) >= 5000:
break
fiction_sentences.append(sent)
print(f" [{i+1}/{len(fiction_books)}] {book['title'][:50]} - {len(sentences)} sentences (total: {len(fiction_sentences)})")
# Extract sentences from non-fiction books
print("\nExtracting sentences from non-fiction books...")
non_fiction_sentences = []
for i, book in enumerate(non_fiction_books):
if len(non_fiction_sentences) >= 5000:
break
sentences = extract_sentences_from_book(book["content"])
for sent in sentences:
if len(non_fiction_sentences) >= 5000:
break
non_fiction_sentences.append(sent)
print(f" [{i+1}/{len(non_fiction_books)}] {book['title'][:50]} - {len(sentences)} sentences (total: {len(non_fiction_sentences)})")
print(f"\nFiction sentences collected: {len(fiction_sentences)}")
print(f"Non-fiction sentences collected: {len(non_fiction_sentences)}")
# Combine all sentences
all_sentences = fiction_sentences[:5000] + non_fiction_sentences[:5000]
print(f"Total sentences: {len(all_sentences)}")
# Save to output file
output_dir = dirname(dirname(__file__))
output_file = join(output_dir, "sentences_uvb.txt")
with open(output_file, "w", encoding="utf-8") as f:
for i, sent in enumerate(all_sentences, 1):
source = "fiction" if i <= len(fiction_sentences[:5000]) else "non-fiction"
f.write(f"{i}\t{source}\t{sent}\n")
print(f"\nSaved to: {output_file}")
# Print samples
print("\nSample fiction sentences:")
for i, sent in enumerate(fiction_sentences[:3], 1):
print(f" {i}. {sent[:100]}...")
print("\nSample non-fiction sentences:")
for i, sent in enumerate(non_fiction_sentences[:3], 1):
print(f" {i}. {sent[:100]}...")
if __name__ == "__main__":
fetch_and_process()