UVB-v0.1 / scripts /add_publish_date.py
rain1024's picture
Add add_publish_date.py script
07afab6 verified
#!/usr/bin/env python3
"""Add first publish date to matched books by fetching from Goodreads.
Usage:
python add_publish_date.py --input matched_books_with_genres.jsonl --output matched_books_final.jsonl
"""
import argparse
import json
import re
import time
from pathlib import Path
import requests
from tqdm import tqdm
def fetch_first_publish(url: str, max_retries: int = 3) -> str | None:
"""Fetch first publish date from a Goodreads book page."""
if not url:
return None
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36'
}
for attempt in range(max_retries):
try:
response = requests.get(url, headers=headers, timeout=10)
if response.status_code == 200:
html = response.text
# Pattern 1: "First published Month Day, Year" or "First published Year"
patterns = [
r'First published[^<]*?(\w+\s+\d{1,2},?\s+\d{4})',
r'First published[^<]*?(\d{4})',
r'first published[^<]*?(\w+\s+\d{1,2},?\s+\d{4})',
r'first published[^<]*?(\d{4})',
# Pattern for "Published Month Day, Year" (fallback)
r'Published[^<]*?(\w+\s+\d{1,2},?\s+\d{4})',
r'Published[^<]*?(\d{4})',
# Pattern in JSON data
r'"datePublished"\s*:\s*"(\d{4}(?:-\d{2}-\d{2})?)"',
r'"publicationTime"\s*:\s*(\d+)',
]
for pattern in patterns:
match = re.search(pattern, html, re.IGNORECASE)
if match:
date_str = match.group(1)
# Extract year from the date
year_match = re.search(r'(\d{4})', date_str)
if year_match:
return year_match.group(1)
return None
elif response.status_code == 429: # Rate limited
time.sleep(5 * (attempt + 1))
continue
else:
return None
except Exception as e:
if attempt < max_retries - 1:
time.sleep(2)
continue
return None
return None
def main():
parser = argparse.ArgumentParser(description="Add first publish date to matched books")
parser.add_argument("--input", "-i", default="matched_books_with_genres.jsonl", help="Input file")
parser.add_argument("--output", "-o", default="matched_books_final.jsonl", help="Output file")
parser.add_argument("--delay", "-d", type=float, default=1.0, help="Delay between requests (seconds)")
parser.add_argument("--limit", "-l", type=int, default=None, help="Limit number of books to process")
args = parser.parse_args()
# Load matched books
input_path = Path(args.input)
with open(input_path, 'r') as f:
books = [json.loads(line) for line in f]
if args.limit:
books = books[:args.limit]
print(f"Processing {len(books)} books...")
# Fetch first publish date for each book
for book in tqdm(books, desc="Fetching publish dates"):
url = book.get('goodreads_url', '')
if url:
first_publish = fetch_first_publish(url)
book['first_publish'] = first_publish
time.sleep(args.delay) # Be nice to Goodreads
else:
book['first_publish'] = None
# Save results
output_path = Path(args.output)
with open(output_path, 'w') as f:
for book in books:
f.write(json.dumps(book, ensure_ascii=False) + '\n')
# Stats
books_with_date = sum(1 for b in books if b.get('first_publish'))
print(f"\n{'='*50}")
print(f"Results saved to: {output_path}")
print(f"Books processed: {len(books)}")
print(f"Books with first_publish: {books_with_date}")
if __name__ == "__main__":
main()