UVB-v0.1 / scripts /map_goodreads.py
rain1024's picture
Add processing scripts
f2bc6cb verified
#!/usr/bin/env python3
"""Map Vietnamese fiction books to Goodreads entries.
Uses STREAMING mode and EXACT MATCHING ONLY for speed.
Fuzzy matching is too slow for large datasets.
Usage:
python map_goodreads.py --output mappings.jsonl
Requirements:
pip install datasets tqdm
"""
import argparse
import json
import re
import unicodedata
from pathlib import Path
from datasets import load_dataset
from rapidfuzz import fuzz
from tqdm import tqdm
def normalize_text(text: str) -> str:
"""Normalize text for matching."""
if not text:
return ""
text = text.lower()
text = unicodedata.normalize("NFD", text)
text = "".join(c for c in text if unicodedata.category(c) != "Mn")
text = re.sub(r"[^\w\s]", " ", text)
text = re.sub(r"\s+", " ", text).strip()
return text
def clean_author(author: str) -> str:
"""Clean author string by removing extra metadata."""
if not author:
return ""
# Remove common suffixes
author = re.split(
r'\s+(?:LỜI|MỤC|CHƯƠNG|Phần|Dẫn|Nguồn|Bản quyền|Xuất bản|Người dịch|'
r'Cung cấp|Phát hành|http|www\.|Thể loại|First published|FREE|eBook|\*)',
author, flags=re.IGNORECASE
)[0].strip()
# Remove trailing punctuation and metadata
author = re.sub(r'[\*\:\.\,]+$', '', author).strip()
# Remove year patterns at end
author = re.sub(r'\s+\d{4}\.?$', '', author).strip()
return author
def clean_title(title: str) -> str:
"""Clean title by removing embedded metadata."""
if not title:
return ""
# Remove ebook project boilerplate
ebook_patterns = [
r'\s*Chào mừng các bạn đón đọc.*$',
r'\s*Với minh họa của chính tác giả.*$',
r'\s*Nhà xuất bản.*$',
r'\s*Chuyển sang ấn bản điện tử.*$',
r'\s*Nguyên tác:.*$',
r'\s*Thực hiện ebook.*$',
r'\s*Original title:.*$',
]
for pattern in ebook_patterns:
title = re.sub(pattern, '', title, flags=re.IGNORECASE)
# Remove translator info in parentheses: (Bùi Giáng dịch), (Dịch giả: X)
title = re.sub(r'\s*\([^)]*dịch[^)]*\)', '', title, flags=re.IGNORECASE)
title = re.sub(r'\s*\(Dịch giả[^)]*\)', '', title, flags=re.IGNORECASE)
return title.strip()
def is_western_name(text: str) -> bool:
"""Check if text looks like a Western author name."""
if not text or len(text) < 5:
return False
# Western names: mixed case, may contain "de", "von", periods, hyphens
# Examples: "Antoine de Saint-Exupéry", "J.K. Rowling", "Leo Tolstoy"
western_pattern = r'^[A-Z][a-z]+(?:\s+(?:de|von|van|du|le|la|the|J\.|K\.|[A-Z]\.)*\s*[A-Z][a-zéèêëàâäùûüôöîïç\-]+)+$'
return bool(re.match(western_pattern, text))
def extract_foreign_author(text: str) -> tuple[str, str]:
"""Extract foreign author name from text, return (title, author).
Handles patterns like:
- "HOÀNG TỬ BÉ Antoine de Saint-Exupéry" -> ("HOÀNG TỬ BÉ", "Antoine de Saint-Exupéry")
- "Antoine de Saint-Exupéry HOÀNG TỬ BÉ" -> ("HOÀNG TỬ BÉ", "Antoine de Saint-Exupéry")
- "Hoàng Tử Bé ANTOINE DE SAINT EXUPÉRY" -> ("Hoàng Tử Bé", "Antoine De Saint Exupéry")
"""
if not text:
return "", ""
# Define character classes
VN_UPPER = r'A-ZÀÁẢÃẠĂẰẮẲẴẶÂẦẤẨẪẬĐÈÉẺẼẸÊỀẾỂỄỆÌÍỈĨỊÒÓỎÕỌÔỒỐỔỖỘƠỜỚỞỠỢÙÚỦŨỤƯỪỨỬỮỰỲÝỶỸỴ'
VN_LOWER = r'a-zàáảãạăằắẳẵặâầấẩẫậđèéẻẽẹêềếểễệìíỉĩịòóỏõọôồốổỗộơờớởỡợùúủũụưừứửữựỳýỷỹỵ'
WESTERN_LOWER = r'a-zàáảãạăằắẳẵặâầấẩẫậđèéẻẽẹêềếểễệìíỉĩịòóỏõọôồốổỗộơờớởỡợùúủũụưừứửữựỳýỷỹỵéèêëàâäùûüôöîïç'
# Compound name part: handles "Saint-Exupéry" = Name[-Name]*
COMPOUND_NAME = rf'[A-Z][{WESTERN_LOWER}]+(?:-[A-Z][{WESTERN_LOWER}]+)*'
# Western author name pattern: "Antoine de Saint-Exupéry", "Leo Tolstoy"
WESTERN_NAME = (
rf'[A-Z][{WESTERN_LOWER}]+' # First name
r'(?:'
r'\s+(?:de|von|van|du|le|la|di|da|del|dos|das)' # Particle
rf'\s+{COMPOUND_NAME}' # Compound name after particle
r')?'
rf'(?:\s+{COMPOUND_NAME})*' # More name parts
)
# Pattern 1: Vietnamese Title case followed by author in ALL CAPS (Western name)
# e.g., "Hoàng Tử Bé ANTOINE DE SAINT EXUPÉRY Chào mừng..."
# Must check this FIRST before Pattern 2, otherwise "Hoàng Tử Bé" matches as Western name
match = re.match(
rf'^([{VN_UPPER}][{VN_LOWER}]+(?:\s+[{VN_UPPER}][{VN_LOWER}]+)*)\s+'
r'([A-ZÉÈÊËÀÂÄÙÛÜÔÖÎÏÇ][A-ZÉÈÊËÀÂÄÙÛÜÔÖÎÏÇ\s\-]+?)'
r'(?=\s+(?:Chào|Nhà|Với|dịch|Dịch|Nguồn|http|[A-Z][a-z])|$)',
text
)
if match:
title = match.group(1).strip()
author_caps = match.group(2).strip()
if len(title) >= 3 and len(author_caps) >= 5:
# Check it's mostly uppercase (foreign name)
upper_ratio = sum(1 for c in author_caps if c.isupper()) / max(len(author_caps.replace(' ', '')), 1)
if upper_ratio > 0.7:
return title, author_caps.title()
# Pattern 2: Vietnamese ALL CAPS title followed by Western name (Title case)
# e.g., "HOÀNG TỬ BÉ Antoine de Saint-Exupéry"
match = re.match(
rf'^([{VN_UPPER}][{VN_UPPER}\s]+?)\s+({WESTERN_NAME})(?=\s*$|\s+[{VN_UPPER}]?[{VN_LOWER}])',
text
)
if match:
vn_title = match.group(1).strip()
author = match.group(2).strip()
if len(vn_title) >= 3 and len(author) >= 5:
return vn_title, author
# Pattern 3: Western name (Title case) followed by Vietnamese ALL CAPS title
# e.g., "Antoine de Saint-Exupéry HOÀNG TỬ BÉ"
match = re.match(
rf'^({WESTERN_NAME})\s+([{VN_UPPER}][{VN_UPPER}\s]+?)(?=\s+[{VN_UPPER}]?[{VN_LOWER}]|\s*$)',
text
)
if match:
author = match.group(1).strip()
vn_title = match.group(2).strip()
if len(vn_title) >= 3 and len(author) >= 5:
return vn_title, author
# Pattern 4: English ALL CAPS title followed by Western name (Title case)
# e.g., "THE LITTLE PRINCE Antoine de Saint-Exupéry"
match = re.match(
rf'^([A-Z][A-Z\s]+?)\s+({WESTERN_NAME})(?=\s*$|\s+[{VN_UPPER}]?[{VN_LOWER}])',
text
)
if match:
title = match.group(1).strip()
author = match.group(2).strip()
if len(title) >= 3 and len(author) >= 5:
return title, author
return "", ""
def extract_title_author(text: str) -> tuple[str, str]:
"""Extract title and author from Vietnamese book text.
Handles common Vietnamese book patterns:
- "TITLE Tác giả: AUTHOR"
- "TITLE Author: AUTHOR" (English)
- "TITLE - AUTHOR"
- "TITLE by AUTHOR"
- "TITLE\nAUTHOR\nLỜI NÓI ĐẦU..."
- "HOÀNG TỬ BÉ Antoine de Saint-Exupéry (Bùi Giáng dịch)"
"""
if not text:
return "", ""
# Get first ~1000 chars for analysis
header = text[:1000].strip()
lines = header.split("\n")
if not lines:
return "", ""
first_line = lines[0].strip()
title = ""
author = ""
# Skip non-book content (technical docs, etc.)
skip_patterns = [
r'^(REC-|W3C|DOCTYPE|<!|<\?|http://|https://|Copyright|\*\s*\*\s*\*)',
r'^(next\s+contents|properties\s+index)',
]
for pattern in skip_patterns:
if re.match(pattern, first_line, re.IGNORECASE):
return "", ""
# Pre-clean the first line: remove translator parentheses
first_line_cleaned = clean_title(first_line)
# NEW Pattern 0: Foreign author embedded in title
# e.g., "HOÀNG TỬ BÉ Antoine de Saint-Exupéry (Bùi Giáng dịch)"
foreign_title, foreign_author = extract_foreign_author(first_line_cleaned)
if foreign_title and foreign_author:
return clean_title(foreign_title), foreign_author
# Pattern 1: "Tác giả:" or "Tác Giả:" marker
tac_gia_match = re.search(
r'^(.+?)\s*[Tt]ác\s*[Gg]iả\s*[:\-]\s*(.+)',
first_line_cleaned
)
if tac_gia_match:
title = tac_gia_match.group(1).strip()
author = clean_author(tac_gia_match.group(2).strip())
return clean_title(title), author
# Pattern 2: "Author:" marker (English)
author_match = re.search(
r'^(.+?)\s*Author\s*[:\-]\s*(.+)',
first_line_cleaned, re.IGNORECASE
)
if author_match:
title = author_match.group(1).strip()
author = clean_author(author_match.group(2).strip())
return clean_title(title), author
# Pattern 3: "Dịch giả:" marker (translator) - extract title before it
dich_gia_match = re.search(
r'^(.+?)\s*[Dd]ịch\s*[Gg]iả\s*[:\-]\s*(.+)',
first_line
)
if dich_gia_match:
title = dich_gia_match.group(1).strip()
# Try to extract author from title (may contain foreign author)
t, a = extract_foreign_author(title)
if t and a:
return clean_title(t), a
return clean_title(title), ""
# Pattern 4: "Nguyên tác:" marker - original work
nguyen_tac_match = re.search(
r'^(.+?)\s*Nguyên\s*tác\s*[:\-]\s*(.+)',
first_line_cleaned, re.IGNORECASE
)
if nguyen_tac_match:
title = nguyen_tac_match.group(1).strip()
# Look for author after original title
remainder = nguyen_tac_match.group(2).strip()
# Author might be in next segment
for line in lines[1:5]:
line = line.strip()
if line and len(line) < 60:
author = clean_author(line)
break
return clean_title(title), author
# Pattern 5: "Nguồn:" marker - title before, ignore source
nguon_match = re.search(r'^(.+?)\s*Nguồn\s*:', first_line_cleaned)
if nguon_match:
title = nguon_match.group(1).strip()
for line in lines[1:5]:
line = line.strip()
if line and not re.match(r'^(LỜI|MỤC|CHƯƠNG|Phần|\d+|http)', line, re.IGNORECASE):
author = clean_author(line)
break
return clean_title(title), author
# Pattern 6: Vietnamese author name pattern in title
# e.g., "CHU DỊCH QUỐC VĂN DIỄN GIẢI Sào Nam Phan Bội Châu LỜI GIỚI THIỆU"
vn_name_match = re.search(
r'^(.+?)\s+([A-ZÀÁẢÃẠĂẰẮẲẴẶÂẦẤẨẪẬĐÈÉẺẼẸÊỀẾỂỄỆÌÍỈĨỊÒÓỎÕỌÔỒỐỔỖỘƠỜỚỞỠỢÙÚỦŨỤƯỪỨỬỮỰỲÝỶỸỴ][a-zàáảãạăằắẳẵặâầấẩẫậđèéẻẽẹêềếểễệìíỉĩịòóỏõọôồốổỗộơờớởỡợùúủũụưừứửữựỳýỷỹỵ]+(?:\s+[A-ZÀÁẢÃẠĂẰẮẲẴẶÂẦẤẨẪẬĐÈÉẺẼẸÊỀẾỂỄỆÌÍỈĨỊÒÓỎÕỌÔỒỐỔỖỘƠỜỚỞỠỢÙÚỦŨỤƯỪỨỬỮỰỲÝỶỸỴ][a-zàáảãạăằắẳẵặâầấẩẫậđèéẻẽẹêềếểễệìíỉĩịòóỏõọôồốổỗộơờớởỡợùúủũụưừứửữựỳýỷỹỵ]+){1,4})\s+(?:LỜI|MỤC|CHƯƠNG|Phần)',
first_line_cleaned
)
if vn_name_match:
title = vn_name_match.group(1).strip()
author = vn_name_match.group(2).strip()
return clean_title(title), author
# Pattern 7: " - " separator
if " - " in first_line_cleaned:
parts = first_line_cleaned.split(" - ", 1)
title = parts[0].strip()
author = clean_author(parts[1].strip()) if len(parts) > 1 else ""
return clean_title(title), author
# Pattern 8: " by " (English)
if " by " in first_line_cleaned.lower():
parts = re.split(r'\s+by\s+', first_line_cleaned, flags=re.IGNORECASE)
title = parts[0].strip()
author = clean_author(parts[1].strip()) if len(parts) > 1 else ""
return clean_title(title), author
# Pattern 9: Content markers in first line - title is before
content_markers = [
'LỜI NÓI ĐẦU', 'LỜI GIỚI THIỆU', 'LỜI TỰA', 'MỤC LỤC',
'CHƯƠNG', 'Phần', 'Dẫn nhập', 'Lời mở đầu', 'PHẦN',
'Tác phẩm', 'Bản quyền', 'Xuất bản', 'Chào mừng các bạn'
]
for marker in content_markers:
if marker.upper() in first_line_cleaned.upper():
idx = first_line_cleaned.upper().find(marker.upper())
if idx > 5:
title = first_line_cleaned[:idx].strip()
# Try to extract author from this title segment
t, a = extract_foreign_author(title)
if t and a:
return clean_title(t), a
return clean_title(title), author
# Default: first line is title, look for author in next lines
title = first_line_cleaned
for line in lines[1:5]:
line = line.strip()
if not line:
continue
if re.match(r'^(LỜI|MỤC|CHƯƠNG|Phần|\d+|http|www\.|Copyright|©|\*|Chào mừng)', line, re.IGNORECASE):
continue
if len(line) > 80:
continue
author = clean_author(line)
break
return clean_title(title), author
def load_vietnamese_books_streaming(source: str, limit: int | None = None) -> list[dict]:
"""Load Vietnamese books using streaming mode."""
print(f"Loading Vietnamese books from {source} (streaming)...")
ds = load_dataset(source, split="train", streaming=True)
books = []
for i, row in enumerate(tqdm(ds, desc="Loading VN books", total=limit)):
if limit and i >= limit:
break
text = row.get("text", "")
title, author = extract_title_author(text)
books.append({
"id": f"vn_{i:06d}",
"title": title,
"author": author,
})
print(f"Loaded {len(books)} Vietnamese books")
return books
def find_matches_streaming(
vn_books: list[dict],
goodreads_source: str = "BrightData/Goodreads-Books",
goodreads_limit: int | None = None,
fuzzy_threshold: int = 85,
) -> list[dict]:
"""Stream through Goodreads and find matches (exact + fuzzy)."""
# Build index of Vietnamese books by normalized title
vn_index: dict[str, list[dict]] = {}
vn_titles_set: set[str] = set()
vn_title_lengths: set[int] = set()
for book in vn_books:
norm_title = normalize_text(book["title"])
if norm_title:
if norm_title not in vn_index:
vn_index[norm_title] = []
vn_index[norm_title].append(book)
vn_titles_set.add(norm_title)
vn_title_lengths.add(len(norm_title))
# Pre-compute length ranges for fuzzy matching
min_len = min(vn_title_lengths) if vn_title_lengths else 0
max_len = max(vn_title_lengths) if vn_title_lengths else 0
print(f"Built index with {len(vn_index)} unique normalized titles")
print(f"Title length range: {min_len} - {max_len} chars")
# Track matches and candidates for fuzzy matching
matches: dict[str, dict | None] = {book["id"]: None for book in vn_books}
fuzzy_candidates: list[tuple[str, dict]] = [] # (norm_title, gr_data)
print(f"\nPhase 1: Streaming Goodreads for exact matches...")
ds = load_dataset(goodreads_source, split="train", streaming=True)
exact_count = 0
for i, row in enumerate(tqdm(ds, desc="Exact matching", total=goodreads_limit)):
if goodreads_limit and i >= goodreads_limit:
break
gr_title = row.get("name", "")
if not gr_title:
continue
norm_gr_title = normalize_text(gr_title)
if not norm_gr_title:
continue
gr_data = {
"goodreads_id": row.get("id", ""),
"goodreads_url": row.get("url", ""),
"goodreads_title": gr_title,
"goodreads_author": row.get("author", ""),
"goodreads_rating": row.get("star_rating"),
"goodreads_num_ratings": row.get("num_ratings"),
}
# Exact title matches (O(1) lookup)
if norm_gr_title in vn_index:
for vn_book in vn_index[norm_gr_title]:
vn_id = vn_book["id"]
if matches[vn_id] is None:
exact_count += 1
matches[vn_id] = {
**gr_data,
"match_type": "exact_title",
"match_score": 100,
}
# Store potential fuzzy candidates (filter by length + popularity)
gr_len = len(norm_gr_title)
num_ratings = gr_data.get("goodreads_num_ratings") or 0
# Keep only titles within length range AND with some ratings (popular books)
if min_len * 0.6 <= gr_len <= max_len * 1.4:
if num_ratings >= 10 or len(fuzzy_candidates) < 500000:
fuzzy_candidates.append((norm_gr_title, gr_data))
print(f"\nExact matches: {exact_count}")
print(f"Fuzzy candidates: {len(fuzzy_candidates)}")
# Phase 2: Fuzzy matching for unmatched books
unmatched_books = [b for b in vn_books if matches[b["id"]] is None and b["title"]]
if unmatched_books and fuzzy_candidates:
print(f"\nPhase 2: Fuzzy matching {len(unmatched_books)} unmatched books...")
print(f"Using {len(fuzzy_candidates)} Goodreads candidates")
# Build lookup dict for fuzzy candidates
gr_title_to_data = {t: d for t, d in fuzzy_candidates}
gr_titles = list(gr_title_to_data.keys())
fuzzy_count = 0
from rapidfuzz import process
for vn_book in tqdm(unmatched_books, desc="Fuzzy matching"):
vn_id = vn_book["id"]
norm_vn_title = normalize_text(vn_book["title"])
if not norm_vn_title or len(norm_vn_title) < 5:
continue
# Use rapidfuzz's optimized extractOne
result = process.extractOne(
norm_vn_title,
gr_titles,
scorer=fuzz.ratio,
score_cutoff=fuzzy_threshold,
)
if result:
matched_title, score, _ = result
gr_data = gr_title_to_data[matched_title]
fuzzy_count += 1
matches[vn_id] = {
**gr_data,
"match_type": "fuzzy_title",
"match_score": int(score),
}
print(f"Fuzzy matches: {fuzzy_count}")
# Build results
results = []
for vn_book in vn_books:
vn_id = vn_book["id"]
result = {
"vn_id": vn_id,
"vn_title": vn_book["title"],
"vn_author": vn_book["author"],
}
if matches[vn_id]:
result.update(matches[vn_id])
else:
result["goodreads_id"] = None
results.append(result)
return results
def main():
parser = argparse.ArgumentParser(description="Map Vietnamese books to Goodreads")
parser.add_argument(
"--output", "-o",
default="mappings.jsonl",
help="Output file for mappings (JSONL format)",
)
parser.add_argument(
"--vn-source",
default="tmnam20/Vietnamese-Book-Corpus",
help="Vietnamese books dataset on HuggingFace",
)
parser.add_argument(
"--vn-limit",
type=int,
default=None,
help="Limit Vietnamese books to process",
)
parser.add_argument(
"--goodreads-limit",
type=int,
default=None,
help="Limit Goodreads books to scan",
)
args = parser.parse_args()
# Load Vietnamese books
vn_books = load_vietnamese_books_streaming(args.vn_source, args.vn_limit)
# Find matches by streaming Goodreads
results = find_matches_streaming(
vn_books,
goodreads_limit=args.goodreads_limit,
)
# Save results
output_path = Path(args.output)
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, "w", encoding="utf-8") as f:
for r in results:
f.write(json.dumps(r, ensure_ascii=False) + "\n")
matched = sum(1 for r in results if r.get("goodreads_id"))
print(f"\n{'='*50}")
print(f"Results saved to: {output_path}")
print(f"Total Vietnamese books: {len(vn_books)}")
print(f"Matched to Goodreads: {matched} ({100*matched/len(vn_books):.1f}%)")
print(f"Unmatched: {len(vn_books) - matched}")
if __name__ == "__main__":
main()