|
|
--- |
|
|
license: mit |
|
|
--- |
|
|
|
|
|
## FalconNet/FunPay-Minecraft-Lots-Mini-6k |
|
|
- Prices are in Rubles |
|
|
|
|
|
|
|
|
### Script used to create this: |
|
|
```python |
|
|
from __future__ import annotations |
|
|
|
|
|
import argparse |
|
|
import asyncio |
|
|
import csv |
|
|
import json |
|
|
import re |
|
|
from dataclasses import dataclass, asdict |
|
|
from pathlib import Path |
|
|
from typing import List, Optional |
|
|
|
|
|
from bs4 import BeautifulSoup |
|
|
from playwright.async_api import async_playwright, TimeoutError as PlaywrightTimeout |
|
|
|
|
|
|
|
|
@dataclass |
|
|
class Lot: |
|
|
"""Represents a single offer on FunPay.""" |
|
|
|
|
|
lot_id: int |
|
|
server: str |
|
|
title: str |
|
|
category: str |
|
|
seller: str |
|
|
reviews: int |
|
|
price: float |
|
|
currency: str |
|
|
link: str |
|
|
|
|
|
|
|
|
class FunpayParser: |
|
|
"""High-level API for scraping the public FunPay catalog.""" |
|
|
|
|
|
BASE_URL = "https://funpay.com" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
language: str = "ru", |
|
|
currency: str = "RUB", |
|
|
headless: bool = False, |
|
|
timeout: int = 10_000, |
|
|
) -> None: |
|
|
self.language = language |
|
|
self.currency = currency |
|
|
self.headless = headless |
|
|
self.timeout = timeout |
|
|
|
|
|
# ─────────────────────────── internal helpers ──────────────────────────── # |
|
|
|
|
|
async def _fetch_html(self, url: str) -> str: |
|
|
"""Render *url* with Playwright and return the final HTML source.""" |
|
|
async with async_playwright() as p: |
|
|
browser = await p.chromium.launch(headless=self.headless) |
|
|
context = await browser.new_context( |
|
|
locale=self.language, |
|
|
extra_http_headers={ |
|
|
"Accept-Language": self.language, |
|
|
"User-Agent": ( |
|
|
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) " |
|
|
"AppleWebKit/537.36 (KHTML, like Gecko) " |
|
|
"Chrome/124.0.0.0 Safari/537.36" |
|
|
), |
|
|
}, |
|
|
) |
|
|
# Pre-set currency so the prices are parsed consistently |
|
|
await context.add_cookies( |
|
|
[ |
|
|
{ |
|
|
"name": "cy", |
|
|
"value": self.currency, |
|
|
"domain": "funpay.com", |
|
|
"path": "/", |
|
|
} |
|
|
] |
|
|
) |
|
|
|
|
|
page = await context.new_page() |
|
|
try: |
|
|
# Navigate and wait for the full page load, including JS content |
|
|
await page.goto(url, wait_until="load", timeout=self.timeout) |
|
|
|
|
|
# Явное ожидание появления важного элемента (например, списка лотов) |
|
|
await page.wait_for_selector(".tc-item", timeout=10_000) # Ждем появления первого лота |
|
|
|
|
|
except PlaywrightTimeout: |
|
|
await browser.close() |
|
|
raise RuntimeError(f"Page load timeout: {url}") |
|
|
|
|
|
# Accept cookies banner if present |
|
|
try: |
|
|
await page.locator("button:has-text('Agree')").click(timeout=2_000) |
|
|
except PlaywrightTimeout: |
|
|
pass # banner not shown |
|
|
|
|
|
html = await page.content() |
|
|
await browser.close() |
|
|
return html |
|
|
|
|
|
def _parse_html(self, html: str) -> List[Lot]: |
|
|
"""Extract structured data from the rendered HTML using BeautifulSoup.""" |
|
|
soup = BeautifulSoup(html, "lxml") |
|
|
lots: List[Lot] = [] |
|
|
|
|
|
for anchor in soup.select("a.tc-item"): |
|
|
link = self.BASE_URL + anchor.get("href", "") |
|
|
|
|
|
# Server / sub-category (can be empty) |
|
|
server_el = anchor.select_one(".tc-server") |
|
|
server = server_el.get_text(strip=True) if server_el else "" |
|
|
|
|
|
# Title / description |
|
|
title_el = anchor.select_one(".tc-desc") or anchor.select_one(".tc-title") |
|
|
title = title_el.get_text(strip=True) if title_el else "" |
|
|
|
|
|
# Small badge with an additional category mark |
|
|
badge_el = anchor.select_one(".badge") |
|
|
category = badge_el.get_text(strip=True) if badge_el else "" |
|
|
|
|
|
# Seller block |
|
|
seller_el = anchor.select_one(".media-user-name") |
|
|
seller = seller_el.get_text(strip=True) if seller_el else "" |
|
|
# Reviews are rendered as “(1234)” |
|
|
rev_el = anchor.select_one(".rating-count") |
|
|
reviews = int(rev_el.get_text(strip=True).strip("()")) if rev_el else 0 |
|
|
|
|
|
# Price block |
|
|
price_el = anchor.select_one(".tc-price") |
|
|
price_val, price_curr = 0.0, self.currency |
|
|
if price_el: |
|
|
match = re.match(r"([\d\.,]+)\s*([^\d\s]+)", price_el.get_text(strip=True)) |
|
|
if match: |
|
|
raw, price_curr = match.groups() |
|
|
price_val = float(raw.replace(",", ".")) |
|
|
|
|
|
# Lot ID is available only in the lot link |
|
|
match_id = re.search(r"id=(\d+)", link) |
|
|
lot_id = int(match_id.group(1)) if match_id else 0 |
|
|
|
|
|
lots.append( |
|
|
Lot( |
|
|
lot_id=lot_id, |
|
|
server=server, |
|
|
title=title, |
|
|
category=category, |
|
|
seller=seller, |
|
|
reviews=reviews, |
|
|
price=price_val, |
|
|
currency=price_curr, |
|
|
link=link, |
|
|
) |
|
|
) |
|
|
return lots |
|
|
|
|
|
async def _collect_lots(self, section_id: int, pages: int) -> List[Lot]: |
|
|
"""Coroutine that loads *pages* pages for *section_id* sequentially.""" |
|
|
result: List[Lot] = [] |
|
|
for idx in range(1, pages + 1): |
|
|
url = f"{self.BASE_URL}/{self.language}/lots/{section_id}/?page={idx}" |
|
|
html = await self._fetch_html(url) |
|
|
page_lots = self._parse_html(html) |
|
|
if not page_lots: |
|
|
break # reached last page |
|
|
result.extend(page_lots) |
|
|
return result |
|
|
|
|
|
# ───────────────────────────── public API ──────────────────────────────── # |
|
|
|
|
|
def parse(self, section_id: int, pages: int = 1) -> List[Lot]: |
|
|
"""Synchronously scrape *pages* from a lots section.""" |
|
|
return asyncio.run(self._collect_lots(section_id, pages)) |
|
|
|
|
|
|
|
|
# I/O helpers remain unchanged |
|
|
|
|
|
def save_json(lots: List[Lot], path: Path) -> None: |
|
|
path.write_text(json.dumps([asdict(x) for x in lots], ensure_ascii=False, indent=2), "utf-8") |
|
|
|
|
|
def save_csv(lots: List[Lot], path: Path) -> None: |
|
|
if not lots: |
|
|
return |
|
|
with path.open("w", newline="", encoding="utf-8") as f: |
|
|
writer = csv.DictWriter(f, fieldnames=asdict(lots[0]).keys()) |
|
|
writer.writeheader() |
|
|
for row in lots: |
|
|
writer.writerow(asdict(row)) |
|
|
|
|
|
|
|
|
# CLI entry unchanged |
|
|
if __name__ == "__main__": |
|
|
def _cli() -> None: |
|
|
parser = argparse.ArgumentParser(description="Scrape FunPay lots into JSON/CSV files") |
|
|
parser.add_argument("section", type=int, help="Section numeric ID (e.g. 223 for Minecraft Services)") |
|
|
parser.add_argument("--pages", type=int, default=1, help="Number of paginated screens to scan") |
|
|
parser.add_argument("--out", type=Path, help="Output file (.json or .csv)") |
|
|
parser.add_argument("--headed", action="store_true", help="Run browser in headed mode (for debugging)") |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
funpay = FunpayParser(headless=not args.headed) |
|
|
lots = funpay.parse(args.section, pages=args.pages) |
|
|
print(f"Collected {len(lots)} lots from section #{args.section}") |
|
|
|
|
|
if args.out: |
|
|
if args.out.suffix == ".json": |
|
|
save_json(lots, args.out) |
|
|
elif args.out.suffix == ".csv": |
|
|
save_csv(lots, args.out) |
|
|
else: |
|
|
raise SystemExit("Supported output formats: .json / .csv") |
|
|
print(f"Saved to {args.out.resolve()}") |
|
|
else: |
|
|
from pprint import pprint |
|
|
pprint(lots[:10]) |
|
|
``` |