| |
| """ |
| scrape_products.py — Fetch the RocketReviews.com product index and scrape each |
| detail page, saving structured JSON to source/products/. |
| |
| Output |
| ------ |
| source/products/index.jsonl one record per product (raw index fields) |
| source/products/detail/{id}.json full parsed detail per product |
| |
| Usage |
| ----- |
| python scripts/products/01_scrape.py |
| python scripts/products/01_scrape.py --delay 2.0 --limit 10 |
| python scripts/products/01_scrape.py --force # re-scrape existing files |
| """ |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import json |
| import logging |
| import re |
| import sys |
| import time |
| from datetime import datetime, timezone |
| from pathlib import Path |
| from typing import Optional |
|
|
| import requests |
| from bs4 import BeautifulSoup, NavigableString, Tag |
| from requests.adapters import HTTPAdapter |
| from urllib3.util.retry import Retry |
|
|
| |
| |
| |
|
|
| BASE_URL = "https://www.rocketreviews.com" |
| INDEX_URL = f"{BASE_URL}/data/products/products.php?search=&type=" |
| USER_AGENT = "RocketReviews-Dataset/1.0" |
| DEFAULT_DELAY = 1.0 |
|
|
| ROOT = Path(__file__).parent.parent.parent |
| SOURCE_DIR = ROOT / "source" / "products" |
| DETAIL_DIR = SOURCE_DIR / "detail" |
|
|
| |
| |
| |
|
|
| logging.basicConfig( |
| level=logging.INFO, |
| format="%(asctime)s %(levelname)s %(message)s", |
| handlers=[logging.StreamHandler(sys.stdout)], |
| ) |
| log = logging.getLogger(__name__) |
|
|
| |
| |
| |
|
|
|
|
| def _build_session() -> requests.Session: |
| s = requests.Session() |
| s.headers["User-Agent"] = USER_AGENT |
| retry = Retry( |
| total=3, |
| backoff_factor=2.0, |
| status_forcelist=[429, 500, 502, 503, 504], |
| allowed_methods=["GET"], |
| ) |
| s.mount("https://", HTTPAdapter(max_retries=retry)) |
| s.mount("http://", HTTPAdapter(max_retries=retry)) |
| return s |
|
|
|
|
| class RateLimiter: |
| def __init__(self, delay: float) -> None: |
| self.delay = delay |
| self._last: float = 0.0 |
|
|
| def wait(self) -> None: |
| elapsed = time.monotonic() - self._last |
| if elapsed < self.delay: |
| time.sleep(self.delay - elapsed) |
| self._last = time.monotonic() |
|
|
|
|
| |
| |
| |
|
|
|
|
| def _re_first(pattern: str, text: str, group: int = 1) -> Optional[str]: |
| m = re.search(pattern, text) |
| return m.group(group) if m else None |
|
|
|
|
| def _slug_to_label(slug: str) -> str: |
| """Convert a hyphenated slug to a title-cased label.""" |
| return slug.replace("-", " ").title() |
|
|
|
|
| def _kit_index_int(field: str, html: str) -> Optional[int]: |
| """ |
| Extract an integer value from a /kit-index-{field}-{value}.html href. |
| Used for fields stored as integers (e.g. skill-level). |
| """ |
| val = _re_first(rf"/kit-index-{field}-(\d+)\.html", html) |
| return int(val) if val else None |
|
|
|
|
| def _kit_index_scaled(field: str, html: str, scale: int = 10000) -> Optional[float]: |
| """ |
| Extract a scaled integer from a /kit-index-{field}-{value}.html href |
| and divide by scale. Used for diameter, length, weight (4 decimal places). |
| """ |
| val = _re_first(rf"/kit-index-{field}-(\d+)\.html", html) |
| return int(val) / scale if val else None |
|
|
|
|
| def _kit_index_slug(field: str, html: str) -> Optional[str]: |
| """ |
| Extract and humanize a slug value from a /kit-index-{field}-{slug}.html href. |
| Used for power class, recovery type, status. |
| """ |
| val = _re_first(rf"/kit-index-{field}-([a-z][a-z-]*)\.html", html) |
| return _slug_to_label(val) if val else None |
|
|
|
|
| def _parse_manufacturer(html: str, soup: BeautifulSoup) -> dict: |
| """Extract manufacturer name and URLs from the page.""" |
| |
| mfr_canonical = _re_first(r'href="(/[a-z][a-z0-9-]+-\d+\.html)"', html) |
| url = f"{BASE_URL}{mfr_canonical}" if mfr_canonical else None |
|
|
| |
| mfr_alias = _re_first(r"(/kit-index-manufacturer-[^\"']+\.html)", html) |
| url_alias = f"{BASE_URL}{mfr_alias}" if mfr_alias else None |
|
|
| mfr_link = soup.find("a", href=re.compile(r"/kit-index-manufacturer-")) |
| name = mfr_link.get_text(strip=True) if mfr_link else None |
| return { |
| "name": name, |
| "url": url, |
| "url_alias": url_alias, |
| } |
|
|
|
|
| def _parse_designer(html: str) -> Optional[dict]: |
| """ |
| Extract designer name and URL from /kit-index-designer-{name}-{id}.html. |
| Returns None if no designer is present. |
| """ |
| m = re.search(r"(/kit-index-designer-([a-z][a-z-]*)-\d+\.html)", html) |
| if not m: |
| return None |
| return { |
| "name": _slug_to_label(m.group(2)), |
| "url": f"{BASE_URL}{m.group(1)}", |
| } |
|
|
|
|
| def _parse_styles(html: str) -> list[str]: |
| """Extract all style labels from /kit-index-style-{name}-{id}.html hrefs.""" |
| return [ |
| _slug_to_label(m) |
| for m in re.findall(r"/kit-index-style-([a-z][a-z-]*)-\d+\.html", html) |
| ] |
|
|
|
|
| def _parse_recommended_motors(soup: BeautifulSoup) -> list[str]: |
| """ |
| Find the 'Recommended Motors' label and extract the motor designations |
| that follow it as comma-separated text. |
| """ |
| strong = soup.find("strong", string=re.compile(r"Recommended Motors?", re.I)) |
| if not strong: |
| return [] |
| |
| text = "" |
| for sib in strong.next_siblings: |
| if isinstance(sib, Tag) and sib.name == "strong": |
| break |
| text += sib.get_text() if isinstance(sib, Tag) else str(sib) |
| motors = [m.strip() for m in re.split(r"[,\s]+", text.strip()) if m.strip()] |
| return motors |
|
|
|
|
| def _parse_cp(html: str) -> Optional[dict]: |
| """ |
| Extract CP location and method if present on the product page. |
| CP location href: /kit-index-cp-{value}.html (value = inches * 10000) |
| CP method href: /kit-index-cpmethod-{slug}.html |
| Direction (Front/Rear) extracted from surrounding text. |
| """ |
| location_raw = _re_first(r"/kit-index-cp-(\d+)\.html", html) |
| if not location_raw: |
| return None |
|
|
| location_in = int(location_raw) / 10000 |
|
|
| |
| direction = "Front" |
| m = re.search(r"(\d+\.\d+)\s+inches\s+from\s+(Front|Rear)", html, re.I) |
| if m: |
| direction = m.group(2).title() |
|
|
| method_slug = _re_first(r"/kit-index-cpmethod-([a-z][a-z/]*[a-z-]*)\.html", html) |
| method = _slug_to_label(method_slug) if method_slug else None |
|
|
| return { |
| "location_in": location_in, |
| "location_from": direction, |
| "method": method, |
| } |
|
|
|
|
| def _parse_specs(html: str, soup: BeautifulSoup) -> Optional[dict]: |
| """ |
| Parse kit-specific structured specs from /kit-index-{field}-{value}.html |
| link patterns. Returns None for non-kit product types that lack specs. |
| """ |
| diameter = _kit_index_scaled("diameter", html) |
| length = _kit_index_scaled("length", html) |
| weight = _kit_index_scaled("weight", html) |
| motor_size = _kit_index_int("motor-size", html) |
| skill_level = _kit_index_int("skill-level", html) |
| power_class = _kit_index_slug("power", html) |
| recovery = _kit_index_slug("recovery", html) |
| status_slug = _re_first(r"/kit-index-status-([a-z][a-z-]*)\.html", html) |
| status = _slug_to_label(status_slug) if status_slug else None |
| styles = _parse_styles(html) |
| recommended_motors = _parse_recommended_motors(soup) |
|
|
| |
| if not any([diameter, length, weight, motor_size, skill_level, power_class]): |
| return None |
|
|
| return { |
| "diameter_in": diameter, |
| "length_in": length, |
| "weight_oz": weight, |
| "motor_size_mm": motor_size, |
| "power_class": power_class, |
| "skill_level": skill_level, |
| "style": styles, |
| "recovery": recovery, |
| "status": status, |
| "recommended_motors": recommended_motors, |
| } |
|
|
|
|
| def _parse_detail(html: str, index_rec: dict) -> dict: |
| soup = BeautifulSoup(html, "lxml") |
| product_id = int(index_rec["id"]) |
|
|
| |
| url = f"{BASE_URL}/product-{product_id}.html" |
|
|
| |
| alias_path = index_rec.get("url", "") |
| url_alias = f"{BASE_URL}{alias_path}" if alias_path else None |
|
|
| return { |
| "id": product_id, |
| "url": url, |
| "url_alias": url_alias, |
| "name": index_rec.get("name"), |
| "title": index_rec.get("title"), |
| "type": index_rec.get("type"), |
| "model": index_rec.get("model") or None, |
| "years": index_rec.get("years") or None, |
| "manufacturer": _parse_manufacturer(html, soup), |
| "designer": _parse_designer(html), |
| "scraped_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"), |
| "specs": _parse_specs(html, soup), |
| "cp": _parse_cp(html), |
| } |
|
|
|
|
| |
| |
| |
|
|
|
|
| def fetch_index(session: requests.Session) -> list[dict]: |
| log.info("Fetching product index from %s", INDEX_URL) |
| resp = session.get(INDEX_URL, timeout=30) |
| resp.raise_for_status() |
| records = resp.json().get("records", []) |
| log.info("Index returned %d records.", len(records)) |
| return records |
|
|
|
|
| def scrape_detail( |
| session: requests.Session, |
| rate: RateLimiter, |
| index_rec: dict, |
| force: bool = False, |
| ) -> Optional[dict]: |
| product_id = int(index_rec["id"]) |
| shard = f"{product_id // 1000:03d}" |
| shard_dir = DETAIL_DIR / shard |
| dest = shard_dir / f"{product_id:06d}.json" |
|
|
| if dest.exists() and not force: |
| log.debug("Already scraped %s, skipping.", product_id) |
| return None |
|
|
| |
| url = f"{BASE_URL}/product-{product_id}.html" |
| rate.wait() |
|
|
| try: |
| resp = session.get(url, timeout=30) |
| resp.raise_for_status() |
| except requests.RequestException as exc: |
| log.warning("Failed to fetch product %s: %s", product_id, exc) |
| return None |
|
|
| return _parse_detail(resp.text, index_rec) |
|
|
|
|
| |
| |
| |
|
|
|
|
| def main() -> None: |
| parser = argparse.ArgumentParser(description="Scrape RocketReviews.com products.") |
| parser.add_argument( |
| "--delay", |
| type=float, |
| default=DEFAULT_DELAY, |
| help=f"Seconds between requests (default: {DEFAULT_DELAY})", |
| ) |
| parser.add_argument( |
| "--limit", |
| type=int, |
| default=None, |
| help="Stop after scraping this many detail pages (useful for testing)", |
| ) |
| parser.add_argument( |
| "--force", |
| action="store_true", |
| help="Re-scrape products that already have a saved detail file", |
| ) |
| args = parser.parse_args() |
|
|
| SOURCE_DIR.mkdir(parents=True, exist_ok=True) |
| DETAIL_DIR.mkdir(parents=True, exist_ok=True) |
|
|
| session = _build_session() |
| rate = RateLimiter(args.delay) |
|
|
| |
| |
| |
| records = fetch_index(session) |
|
|
| scraped_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") |
| index_path = SOURCE_DIR / "index.jsonl" |
| with index_path.open("w", encoding="utf-8") as f: |
| for rec in records: |
| f.write(json.dumps({**rec, "scraped_at": scraped_at}) + "\n") |
| log.info("Wrote %d index records to %s", len(records), index_path) |
|
|
| |
| |
| |
| if args.limit: |
| records = records[: args.limit] |
|
|
| ok = skipped = failed = 0 |
| total = len(records) |
|
|
| for i, rec in enumerate(records, 1): |
| result = scrape_detail(session, rate, rec, force=args.force) |
|
|
| if result is None: |
| skipped += 1 |
| continue |
|
|
| product_id = int(rec["id"]) |
| shard = f"{product_id // 1000:03d}" |
| shard_dir = DETAIL_DIR / shard |
| shard_dir.mkdir(parents=True, exist_ok=True) |
| dest = shard_dir / f"{product_id:06d}.json" |
| |
| try: |
| dest.write_text( |
| json.dumps(result, indent=2, ensure_ascii=False), encoding="utf-8" |
| ) |
| ok += 1 |
| log.debug("Saved %s", dest.name) |
| except OSError as exc: |
| log.warning("Could not write %s: %s", dest, exc) |
| failed += 1 |
|
|
| if i % 25 == 0 or i == total: |
| log.info( |
| "Progress: %d/%d — ok=%d skipped=%d failed=%d", |
| i, |
| total, |
| ok, |
| skipped, |
| failed, |
| ) |
|
|
| log.info("Done — ok=%d skipped=%d failed=%d", ok, skipped, failed) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|