UVB-v0.1 / scripts /add_genres.py
rain1024's picture
Add add_genres.py script
0b3d34a verified
#!/usr/bin/env python3
"""Add genre information to matched books by fetching from Goodreads.
Usage:
python add_genres.py --input matched_books.jsonl --output matched_books_with_genres.jsonl
"""
import argparse
import json
import re
import time
from pathlib import Path
import requests
from tqdm import tqdm
def fetch_genres(url: str, max_retries: int = 3) -> list[str]:
"""Fetch genres from a Goodreads book page."""
if not url:
return []
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36'
}
for attempt in range(max_retries):
try:
response = requests.get(url, headers=headers, timeout=10)
if response.status_code == 200:
html = response.text
# Pattern 1: Look for genre links in the page
# Goodreads uses data-testid="genresList" or similar patterns
genre_patterns = [
r'href="/genres/([^"]+)"[^>]*>([^<]+)</a>',
r'class="[^"]*genre[^"]*"[^>]*>([^<]+)<',
r'data-testid="genresList"[^>]*>.*?<a[^>]*>([^<]+)</a>',
r'/genres/([a-z\-]+)',
]
genres = set()
for pattern in genre_patterns:
matches = re.findall(pattern, html, re.IGNORECASE | re.DOTALL)
for match in matches:
if isinstance(match, tuple):
# Take the genre name (usually second group or first if readable)
genre = match[-1] if len(match) > 1 else match[0]
else:
genre = match
# Clean up genre name
genre = genre.strip().replace('-', ' ').title()
# Filter out non-genre strings and UI elements
invalid_genres = [
'http', 'www', 'javascript', 'button', 'click',
'create a free account', 'sign in', 'log in', 'sign up',
'rate this book', 'want to read', 'currently reading',
'read', 'kindle', 'nook', 'audible', 'amazon', 'goodreads',
'more details', 'see all', 'show more', 'view all',
]
if (genre and len(genre) < 50 and
not any(x in genre.lower() for x in invalid_genres)):
genres.add(genre)
# Also look for BookPageMetadataSection which often contains genres
metadata_match = re.search(r'BookPageMetadataSection.*?genres.*?(\[.*?\])', html, re.DOTALL)
if metadata_match:
try:
genre_list = json.loads(metadata_match.group(1))
for g in genre_list:
if isinstance(g, str):
genres.add(g.strip().title())
except:
pass
return sorted(list(genres))[:10] # Limit to 10 genres
elif response.status_code == 429: # Rate limited
time.sleep(5 * (attempt + 1))
continue
else:
return []
except Exception as e:
if attempt < max_retries - 1:
time.sleep(2)
continue
return []
return []
def main():
parser = argparse.ArgumentParser(description="Add genres to matched books")
parser.add_argument("--input", "-i", default="matched_books.jsonl", help="Input file")
parser.add_argument("--output", "-o", default="matched_books_with_genres.jsonl", help="Output file")
parser.add_argument("--delay", "-d", type=float, default=1.0, help="Delay between requests (seconds)")
parser.add_argument("--limit", "-l", type=int, default=None, help="Limit number of books to process")
args = parser.parse_args()
# Load matched books
input_path = Path(args.input)
with open(input_path, 'r') as f:
books = [json.loads(line) for line in f]
if args.limit:
books = books[:args.limit]
print(f"Processing {len(books)} books...")
# Fetch genres for each book
for book in tqdm(books, desc="Fetching genres"):
url = book.get('goodreads_url', '')
if url:
genres = fetch_genres(url)
book['genres'] = genres
time.sleep(args.delay) # Be nice to Goodreads
else:
book['genres'] = []
# Save results
output_path = Path(args.output)
with open(output_path, 'w') as f:
for book in books:
f.write(json.dumps(book, ensure_ascii=False) + '\n')
# Stats
books_with_genres = sum(1 for b in books if b.get('genres'))
total_genres = sum(len(b.get('genres', [])) for b in books)
print(f"\n{'='*50}")
print(f"Results saved to: {output_path}")
print(f"Books processed: {len(books)}")
print(f"Books with genres: {books_with_genres}")
print(f"Total genres found: {total_genres}")
print(f"Average genres per book: {total_genres/len(books):.1f}")
if __name__ == "__main__":
main()