#!/usr/bin/env python3 """ Download test images for SAM3 inference testing Uses free, high-quality images from Unsplash and Pixabay """ import requests from pathlib import Path import time # Configuration OUTPUT_DIR = Path("assets/test_images") OUTPUT_DIR.mkdir(parents=True, exist_ok=True) # Free test images from Unsplash (free to use, no attribution required) # These are direct links to specific images showing potholes, road cracks, and roads UNSPLASH_IMAGES = [ { "url": "https://images.unsplash.com/photo-1597155483629-a55bcccce5c7?w=1200", "filename": "pothole_01.jpg", "description": "Large pothole in asphalt road" }, { "url": "https://images.unsplash.com/photo-1621544402532-00f7d6ee6e9d?w=1200", "filename": "road_crack_01.jpg", "description": "Cracked pavement" }, { "url": "https://images.unsplash.com/photo-1558618666-fcd25c85cd64?w=1200", "filename": "road_01.jpg", "description": "Clean asphalt road" }, { "url": "https://images.unsplash.com/photo-1449034446853-66c86144b0ad?w=1200", "filename": "road_02.jpg", "description": "Highway road surface" }, ] # Pixabay images (CC0 license - free for commercial use) PIXABAY_IMAGES = [ { "url": "https://pixabay.com/get/gf8f2bdb5e6d7fd9b6e7e35e8481e93c1ff5f0e2d1b7a6c4b8b7e7d5e1b7d8c4c_1280.jpg", "filename": "pothole_02.jpg", "description": "Road pothole damage" }, ] def download_image(url, output_path, description): """Download an image from URL""" try: print(f"Downloading: {description}") print(f" URL: {url}") print(f" Output: {output_path}") headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36' } response = requests.get(url, headers=headers, timeout=30) response.raise_for_status() with open(output_path, 'wb') as f: f.write(response.content) print(f" ✅ Downloaded ({len(response.content)} bytes)") return True except Exception as e: print(f" ❌ Failed: {e}") return False def main(): """Download all test images""" print("="*80) print("Downloading Test Images for SAM3") print("="*80) print(f"Output directory: {OUTPUT_DIR}") print() all_images = UNSPLASH_IMAGES + PIXABAY_IMAGES successful = 0 failed = 0 for image_info in all_images: output_path = OUTPUT_DIR / image_info["filename"] # Skip if already exists if output_path.exists(): print(f"Skipping {image_info['filename']} (already exists)") successful += 1 continue if download_image(image_info["url"], output_path, image_info["description"]): successful += 1 else: failed += 1 # Be respectful to servers time.sleep(1) print() print("="*80) print(f"Download Summary") print("="*80) print(f"Total: {len(all_images)}") print(f"Successful: {successful}") print(f"Failed: {failed}") if __name__ == "__main__": main()