#!/usr/bin/env python3 """ OpenTransformers Video Crawler v1.0 Grabby hands for video data - uses yt-dlp for broad site support. Grabs videos + metadata (title, description, duration, tags) Supports YouTube, Vimeo, Twitter, Reddit, and 1000+ sites via yt-dlp. Usage: python video_crawler.py --seeds video_seeds.txt --output /workspace/video --max-gb 50 python video_crawler.py --seed "https://www.youtube.com/watch?v=dQw4w9WgXcQ" --max-duration 300 """ import asyncio import subprocess import argparse import hashlib import json import os import re import time import random from dataclasses import dataclass from datetime import datetime from pathlib import Path from typing import Optional, Set, Dict, List from urllib.parse import urljoin, urlparse import aiohttp # Video URL patterns yt-dlp can handle VIDEO_PATTERNS = [ r'youtube\.com/watch\?v=', r'youtu\.be/', r'vimeo\.com/\d+', r'twitter\.com/.*/status/', r'x\.com/.*/status/', r'reddit\.com/r/.*/comments/', r'tiktok\.com/', r'instagram\.com/p/', r'instagram\.com/reel/', r'facebook\.com/.*/videos/', r'twitch\.tv/', r'dailymotion\.com/video/', r'streamable\.com/', r'v\.redd\.it/', r'gfycat\.com/', r'imgur\.com/.*\.(mp4|gifv)', ] VIDEO_PATTERN_RE = re.compile('|'.join(VIDEO_PATTERNS), re.I) def is_video_url(url: str) -> bool: """Check if URL is a video we can download""" return bool(VIDEO_PATTERN_RE.search(url)) def extract_video_urls(html: str, base_url: str) -> List[str]: """Extract video URLs from HTML""" urls = set() # Find all links for match in re.finditer(r'href=["\']([^"\']+)["\']', html, re.I): href = match.group(1) try: absolute = urljoin(base_url, href) if is_video_url(absolute): urls.add(absolute) except: pass # Find embedded URLs in text/scripts for match in re.finditer(r'https?://[^\s"\'<>]+', html): url = match.group(0) if is_video_url(url): urls.add(url.rstrip('.,;:')) return list(urls) def extract_page_links(html: str, base_url: str) -> Set[str]: """Extract page links for crawling""" links = set() for match in re.finditer(r'href=["\']([^"\']+)["\']', html, re.I): href = match.group(1) if href.startswith(('#', 'javascript:', 'mailto:')): continue try: absolute = urljoin(base_url, href) if absolute.startswith(('http://', 'https://')): absolute = absolute.split('#')[0] if len(absolute) < 500: links.add(absolute) except: pass return links class VideoCrawler: USER_AGENTS = [ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 Chrome/120.0.0.0 Safari/537.36', ] def __init__( self, output_dir: str = 'video_data', max_workers: int = 5, # Lower for video - heavy downloads max_pages: int = 10000, max_bytes: int = 50 * 1024**3, # 50GB default max_duration: int = 300, # 5 min max per video max_height: int = 720, # 720p max metadata_only: bool = False, # Just grab metadata, no video ): self.output_dir = Path(output_dir) self.output_dir.mkdir(parents=True, exist_ok=True) (self.output_dir / 'videos').mkdir(exist_ok=True) self.max_workers = max_workers self.max_pages = max_pages self.max_bytes = max_bytes self.max_duration = max_duration self.max_height = max_height self.metadata_only = metadata_only self.page_queue: asyncio.Queue = asyncio.Queue() self.video_queue: asyncio.Queue = asyncio.Queue() self.seen_pages: Set[str] = set() self.seen_videos: Set[str] = set() self.running = True self.stats = {'pages': 0, 'videos': 0, 'bytes': 0, 'skipped': 0, 'errors': 0} self.start_time = time.time() self.output_file = None self.output_lock = asyncio.Lock() def _report(self) -> str: elapsed = time.time() - self.start_time mb = self.stats['bytes'] / (1024**2) return f"Pages: {self.stats['pages']} | Videos: {self.stats['videos']} | {mb:.1f} MB | Skip: {self.stats['skipped']} | Err: {self.stats['errors']}" async def _fetch_page(self, session: aiohttp.ClientSession, url: str): try: headers = {'User-Agent': random.choice(self.USER_AGENTS)} async with session.get(url, headers=headers, timeout=30, ssl=False, allow_redirects=True) as resp: ct = resp.headers.get('content-type', '').lower() if 'text/html' not in ct: return None, set() html = await resp.text(errors='ignore') links = extract_page_links(html, url) return html, links except: return None, set() def _download_video(self, url: str, source_page: str) -> Optional[Dict]: """Download video using yt-dlp""" video_hash = hashlib.md5(url.encode()).hexdigest()[:16] output_template = str(self.output_dir / 'videos' / f'{video_hash}.%(ext)s') # Build yt-dlp command cmd = [ 'yt-dlp', '--no-playlist', '--max-filesize', '500M', '--socket-timeout', '30', '-f', f'bestvideo[height<={self.max_height}]+bestaudio/best[height<={self.max_height}]', '--merge-output-format', 'mp4', '-o', output_template, '--print-json', '--no-progress', ] if self.max_duration: cmd.extend(['--match-filter', f'duration<={self.max_duration}']) if self.metadata_only: cmd.extend(['--skip-download', '--write-info-json']) cmd.append(url) try: result = subprocess.run( cmd, capture_output=True, text=True, timeout=300 # 5 min timeout ) if result.returncode != 0: self.stats['errors'] += 1 return None # Parse JSON output info = json.loads(result.stdout.strip().split('\n')[-1]) # Find downloaded file video_file = None if not self.metadata_only: for f in (self.output_dir / 'videos').glob(f'{video_hash}.*'): if f.suffix in ('.mp4', '.mkv', '.webm', '.mov'): video_file = f.name break file_size = 0 if video_file: file_size = (self.output_dir / 'videos' / video_file).stat().st_size return { 'url': url, 'source_page': source_page, 'timestamp': datetime.utcnow().isoformat(), 'title': info.get('title', ''), 'description': info.get('description', '')[:1000] if info.get('description') else '', 'duration': info.get('duration', 0), 'width': info.get('width', 0), 'height': info.get('height', 0), 'fps': info.get('fps', 0), 'view_count': info.get('view_count', 0), 'like_count': info.get('like_count', 0), 'uploader': info.get('uploader', ''), 'upload_date': info.get('upload_date', ''), 'tags': info.get('tags', [])[:20] if info.get('tags') else [], 'categories': info.get('categories', []), 'extractor': info.get('extractor', ''), 'format': info.get('format', ''), 'size_bytes': file_size, 'hash': video_hash, 'file': video_file } except subprocess.TimeoutExpired: self.stats['errors'] += 1 return None except Exception as e: self.stats['errors'] += 1 return None async def _page_worker(self, session: aiohttp.ClientSession, worker_id: int): """Crawl pages and find video URLs""" while self.running: try: page_url = await asyncio.wait_for(self.page_queue.get(), timeout=10.0) except asyncio.TimeoutError: if self.page_queue.empty(): break continue if self.stats['bytes'] >= self.max_bytes or self.stats['pages'] >= self.max_pages: self.running = False break html, links = await self._fetch_page(session, page_url) if html: self.stats['pages'] += 1 # Extract video URLs video_urls = extract_video_urls(html, page_url) for video_url in video_urls: if video_url not in self.seen_videos: self.seen_videos.add(video_url) await self.video_queue.put((video_url, page_url)) # Queue new pages for link in links: if link not in self.seen_pages and self.stats['pages'] < self.max_pages: self.seen_pages.add(link) await self.page_queue.put(link) if self.stats['pages'] % 20 == 0: print(f"[{datetime.now().strftime('%H:%M:%S')}] {self._report()} | Queue: {self.video_queue.qsize()}") self.page_queue.task_done() async def _video_worker(self, worker_id: int): """Download videos from queue""" loop = asyncio.get_event_loop() while self.running: try: video_url, source_page = await asyncio.wait_for(self.video_queue.get(), timeout=10.0) except asyncio.TimeoutError: if self.video_queue.empty() and self.page_queue.empty(): break continue if self.stats['bytes'] >= self.max_bytes: self.running = False break # Run yt-dlp in thread pool (blocking operation) result = await loop.run_in_executor( None, self._download_video, video_url, source_page ) if result: self.stats['videos'] += 1 self.stats['bytes'] += result.get('size_bytes', 0) async with self.output_lock: self.output_file.write(json.dumps(result) + '\n') print(f" ✓ {result['title'][:50]}... ({result.get('duration', 0)}s)") else: self.stats['skipped'] += 1 self.video_queue.task_done() async def crawl(self, seeds: List[str]): print(f"OpenTransformers Video Crawler starting") print(f"Seeds: {len(seeds)} | Page workers: {self.max_workers} | Video workers: 3") print(f"Target: {self.max_pages} pages / {self.max_bytes/1024**3:.1f} GB / {self.max_duration}s max") print("-" * 60) # Separate direct video URLs from page URLs for seed in seeds: if is_video_url(seed): self.seen_videos.add(seed) await self.video_queue.put((seed, 'seed')) else: self.seen_pages.add(seed) await self.page_queue.put(seed) timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') output_path = self.output_dir / f'video_{timestamp}.jsonl' self.output_file = open(output_path, 'w', buffering=1) connector = aiohttp.TCPConnector(limit=self.max_workers, ssl=False) async with aiohttp.ClientSession(connector=connector) as session: # Start page crawlers page_workers = [ asyncio.create_task(self._page_worker(session, i)) for i in range(self.max_workers) ] # Start video downloaders (fewer, heavier) video_workers = [ asyncio.create_task(self._video_worker(i)) for i in range(3) ] await asyncio.gather(*page_workers, *video_workers) self.output_file.close() print("-" * 60) print(f"DONE! {self._report()}") print(f"Metadata: {output_path}") video_dir = self.output_dir / 'videos' files = list(video_dir.glob('*.mp4')) + list(video_dir.glob('*.webm')) + list(video_dir.glob('*.mkv')) total_size = sum(f.stat().st_size for f in files) / (1024**3) print(f"Videos: {len(files)} files, {total_size:.2f} GB in {video_dir}") def main(): p = argparse.ArgumentParser(description='OpenTransformers Video Crawler') p.add_argument('--seeds', type=str, help='File with seed URLs') p.add_argument('--seed', type=str, action='append', help='Single seed URL') p.add_argument('--output', type=str, default='video_data') p.add_argument('--workers', type=int, default=5) p.add_argument('--max-pages', type=int, default=10000) p.add_argument('--max-gb', type=float, default=50.0) p.add_argument('--max-duration', type=int, default=300, help='Max video duration (seconds)') p.add_argument('--max-height', type=int, default=720, help='Max video height (pixels)') p.add_argument('--metadata-only', action='store_true', help='Only grab metadata, skip video download') args = p.parse_args() seeds = [] if args.seeds: with open(args.seeds) as f: seeds.extend(line.strip() for line in f if line.strip()) if args.seed: seeds.extend(args.seed) if not seeds: print("ERROR: Provide --seeds file or --seed URLs") return crawler = VideoCrawler( output_dir=args.output, max_workers=args.workers, max_pages=args.max_pages, max_bytes=int(args.max_gb * 1024**3), max_duration=args.max_duration, max_height=args.max_height, metadata_only=args.metadata_only, ) asyncio.run(crawler.crawl(seeds)) if __name__ == '__main__': main()