#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ SimNICT Dataset Batch Downloader Download complete SimNICT datasets from Internet Archive IMPORTANT: This downloader provides access to 8 out of 10 original SimNICT datasets. AutoPET and HECKTOR22 are excluded from public release due to licensing restrictions. Usage: python download_simnict.py --datasets AMOS COVID_19_NY_SBU --output_dir ./data python download_simnict.py --all --output_dir ./data python download_simnict.py --list # Show available datasets Author: TAMP Research Group Version: 1.0 """ import os import sys import argparse import time from pathlib import Path from typing import List, Dict, Optional import logging try: import internetarchive as ia except ImportError: print("āŒ Error: internetarchive library not found") print("Please install it using: pip install internetarchive") sys.exit(1) # ============================================================================= # Dataset Configuration # ============================================================================= SIMNICT_DATASETS = { "AMOS": { "identifier": "simnict-amos", "description": "Abdominal multi-organ segmentation dataset", "volumes": 500, "files": 504, "size_gb": "~22 GB" }, "COVID_19_NY_SBU": { "identifier": "simnict-covid-19-ny-sbu", "description": "COVID-19 NY-SBU chest CT dataset", "volumes": 459, "files": 463, "size_gb": "~30 GB" }, "CT_Images_COVID19": { "identifier": "simnict-ct-images-in-covid-19", "description": "CT Images in COVID-19 dataset", "volumes": 771, "files": 775, "size_gb": "~13 GB" }, "CT_COLONOGRAPHY": { "identifier": "simnict-ct-colonography", "description": "CT colonography screening dataset", "volumes": 1730, "files": 1734, "size_gb": "~271 GB" }, "LNDb": { "identifier": "simnict-lndb", "description": "Lung nodule database", "volumes": 294, "files": 298, "size_gb": "~34 GB" }, "LUNA": { "identifier": "simnict-luna", "description": "Lung nodule analysis dataset", "volumes": 888, "files": 892, "size_gb": "~63 GB" }, "MELA": { "identifier": "simnict-mela", "description": "Melanoma detection dataset", "volumes": 1100, "files": 1104, "size_gb": "~147 GB" }, "STOIC": { "identifier": "simnict-stoic", "description": "COVID-19 AI challenge dataset", "volumes": 2000, "files": 2004, "size_gb": "~243 GB" } } # ============================================================================= # Logging Configuration # ============================================================================= logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler('simnict_download.log'), logging.StreamHandler() ] ) logger = logging.getLogger(__name__) # ============================================================================= # SimNICT Downloader Class # ============================================================================= class SimNICTDownloader: def __init__(self, output_dir: str = "./simnict_data", max_retries: int = 3, chunk_size: int = 1024*1024): """ Initialize SimNICT downloader Args: output_dir: Directory to save downloaded datasets max_retries: Maximum retry attempts for failed downloads chunk_size: Download chunk size in bytes (default 1MB) """ self.output_dir = Path(output_dir) self.max_retries = max_retries self.chunk_size = chunk_size # Create output directory self.output_dir.mkdir(parents=True, exist_ok=True) logger.info(f"šŸ“ Output directory: {self.output_dir.absolute()}") def list_available_datasets(self) -> None: """Display all available SimNICT datasets""" print("\n" + "="*80) print("šŸ“‹ Available SimNICT Datasets (8 out of 10 original datasets)") print("="*80) print("ā„¹ļø Note: AutoPET and HECKTOR22 excluded due to licensing restrictions") print("="*80) total_size = 0 total_volumes = 0 for name, info in SIMNICT_DATASETS.items(): print(f"\nšŸ”¹ {name}") print(f" šŸ“ Description: {info['description']}") print(f" šŸ“Š Volumes: {info['volumes']:,}") print(f" šŸ“„ Files: {info['files']:,}") print(f" šŸ’¾ Size: {info['size_gb']}") print(f" šŸ·ļø ID: {info['identifier']}") print(f" šŸ”— URL: https://archive.org/details/{info['identifier']}") total_volumes += info['volumes'] # Extract numeric size for total calculation size_str = info['size_gb'].replace('~', '').replace(' GB', '') try: total_size += float(size_str) except: pass print(f"\nšŸ“ˆ Total Statistics:") print(f" šŸ—‚ļø Datasets: {len(SIMNICT_DATASETS)}") print(f" šŸ“Š Total Volumes: {total_volumes:,}") print(f" šŸ’¾ Total Size: ~{total_size:.0f} GB") print("="*80) def check_dataset_exists(self, identifier: str) -> bool: """Check if dataset exists on Internet Archive""" try: item = ia.get_item(identifier) return item.exists except Exception as e: logger.error(f"Error checking dataset {identifier}: {e}") return False def get_dataset_files(self, identifier: str) -> List[str]: """Get list of files in a dataset""" try: item = ia.get_item(identifier) if not item.exists: return [] files = [] for file_obj in item.files: if isinstance(file_obj, dict) and 'name' in file_obj: # Only include .nii.gz files (skip metadata) filename = file_obj['name'] if filename.endswith('.nii.gz'): files.append(filename) return sorted(files) except Exception as e: logger.error(f"Error getting files for {identifier}: {e}") return [] def download_dataset(self, dataset_name: str, resume: bool = True, verify_checksum: bool = True) -> bool: """ Download a specific SimNICT dataset Args: dataset_name: Name of dataset to download resume: Whether to resume partial downloads verify_checksum: Whether to verify file checksums Returns: True if download successful, False otherwise """ if dataset_name not in SIMNICT_DATASETS: logger.error(f"āŒ Unknown dataset: {dataset_name}") logger.info(f"Available datasets: {list(SIMNICT_DATASETS.keys())}") return False dataset_info = SIMNICT_DATASETS[dataset_name] identifier = dataset_info['identifier'] logger.info(f"\n{'='*60}") logger.info(f"šŸ“¤ Starting download: {dataset_name}") logger.info(f"šŸ·ļø Identifier: {identifier}") logger.info(f"šŸ“Š Expected volumes: {dataset_info['volumes']}") logger.info(f"šŸ’¾ Estimated size: {dataset_info['size_gb']}") logger.info(f"{'='*60}") # Check if dataset exists if not self.check_dataset_exists(identifier): logger.error(f"āŒ Dataset not found on Internet Archive: {identifier}") return False # Create dataset directory dataset_dir = self.output_dir / dataset_name dataset_dir.mkdir(exist_ok=True) # Get files to download files_to_download = self.get_dataset_files(identifier) if not files_to_download: logger.error(f"āŒ No files found for dataset: {dataset_name}") return False logger.info(f"šŸ“‹ Found {len(files_to_download)} files to download") # Check existing files if resuming existing_files = set() if resume: for file_path in dataset_dir.iterdir(): if file_path.is_file() and file_path.suffix == '.gz': existing_files.add(file_path.name) if existing_files: logger.info(f"šŸ“‚ Found {len(existing_files)} existing files (resume mode)") # Download files successful_downloads = 0 failed_downloads = 0 skipped_files = 0 for i, filename in enumerate(files_to_download, 1): file_path = dataset_dir / filename # Skip if file exists and resuming if resume and filename in existing_files: logger.info(f"ā­ļø Skipping existing file [{i}/{len(files_to_download)}]: {filename}") skipped_files += 1 continue logger.info(f"šŸ“„ Downloading [{i}/{len(files_to_download)}]: {filename}") success = self._download_file_with_retry( identifier, filename, file_path, verify_checksum ) if success: successful_downloads += 1 logger.info(f"āœ… Downloaded: {filename}") else: failed_downloads += 1 logger.error(f"āŒ Failed: {filename}") # Brief pause between downloads time.sleep(0.5) # Summary logger.info(f"\nšŸ“Š Download Summary for {dataset_name}:") logger.info(f" āœ… Successful: {successful_downloads}") logger.info(f" ā­ļø Skipped: {skipped_files}") logger.info(f" āŒ Failed: {failed_downloads}") logger.info(f" šŸ“ Location: {dataset_dir.absolute()}") return failed_downloads == 0 def _download_file_with_retry(self, identifier: str, filename: str, file_path: Path, verify_checksum: bool) -> bool: """Download single file with retry logic""" for attempt in range(self.max_retries): try: # Use internetarchive library to download item = ia.get_item(identifier) # Find the file object file_obj = None for f in item.files: if isinstance(f, dict) and f.get('name') == filename: file_obj = f break if not file_obj: logger.error(f"File not found in item: {filename}") return False # Download the file success = item.download( files=[filename], destdir=file_path.parent, verify=verify_checksum, verbose=False, retries=1 # Handle retries at our level ) if success and file_path.exists(): return True else: raise Exception("Download failed or file not created") except Exception as e: logger.warning(f"āš ļø Attempt {attempt + 1}/{self.max_retries} failed for {filename}: {e}") if attempt < self.max_retries - 1: wait_time = (attempt + 1) * 2 # Exponential backoff logger.info(f"šŸ”„ Retrying in {wait_time} seconds...") time.sleep(wait_time) else: logger.error(f"šŸ’” All {self.max_retries} attempts failed for {filename}") return False return False def download_multiple_datasets(self, dataset_names: List[str], resume: bool = True) -> Dict[str, bool]: """ Download multiple SimNICT datasets Args: dataset_names: List of dataset names to download resume: Whether to resume partial downloads Returns: Dictionary mapping dataset names to success status """ if not dataset_names: logger.error("āŒ No datasets specified") return {} logger.info(f"\nšŸš€ Starting batch download of {len(dataset_names)} datasets") logger.info(f"šŸ“‹ Datasets: {', '.join(dataset_names)}") results = {} successful = 0 for i, dataset_name in enumerate(dataset_names, 1): logger.info(f"\n{'šŸ”„' * 20} Dataset {i}/{len(dataset_names)} {'šŸ”„' * 20}") success = self.download_dataset(dataset_name, resume=resume) results[dataset_name] = success if success: successful += 1 logger.info(f"šŸŽ‰ Successfully downloaded: {dataset_name}") else: logger.error(f"šŸ’” Failed to download: {dataset_name}") # Final summary logger.info(f"\n{'=' * 80}") logger.info(f"šŸ Batch Download Complete") logger.info(f"{'=' * 80}") logger.info(f"āœ… Successful: {successful}/{len(dataset_names)}") logger.info(f"āŒ Failed: {len(dataset_names) - successful}") for dataset_name, success in results.items(): status = "āœ…" if success else "āŒ" logger.info(f" {status} {dataset_name}") return results def validate_downloads(self, dataset_names: List[str]) -> Dict[str, Dict]: """ Validate downloaded datasets Args: dataset_names: List of dataset names to validate Returns: Validation results for each dataset """ logger.info(f"\nšŸ” Validating {len(dataset_names)} datasets...") results = {} for dataset_name in dataset_names: if dataset_name not in SIMNICT_DATASETS: continue dataset_dir = self.output_dir / dataset_name expected_info = SIMNICT_DATASETS[dataset_name] if not dataset_dir.exists(): results[dataset_name] = { "status": "missing", "message": "Dataset directory not found" } continue # Count downloaded files nii_files = list(dataset_dir.glob("*.nii.gz")) file_count = len(nii_files) expected_files = expected_info['files'] completion_rate = (file_count / expected_files) * 100 if file_count == expected_files: status = "complete" message = f"All {file_count} files downloaded successfully" elif file_count > 0: status = "partial" message = f"Partial download: {file_count}/{expected_files} files ({completion_rate:.1f}%)" else: status = "empty" message = "No files found" results[dataset_name] = { "status": status, "files_found": file_count, "files_expected": expected_files, "completion_rate": completion_rate, "message": message } logger.info(f"šŸ“Š {dataset_name}: {message}") return results # ============================================================================= # Command Line Interface # ============================================================================= def main(): parser = argparse.ArgumentParser( description="Download SimNICT datasets from Internet Archive", formatter_class=argparse.RawDescriptionHelpFormatter, epilog=""" Examples: # List available datasets python download_simnict.py --list # Download specific datasets python download_simnict.py --datasets AMOS COVID_19_NY_SBU --output_dir ./data # Download all datasets python download_simnict.py --all --output_dir ./data # Resume interrupted downloads python download_simnict.py --datasets STOIC --resume --output_dir ./data # Validate existing downloads python download_simnict.py --validate AMOS LUNA --output_dir ./data """ ) parser.add_argument( "--datasets", nargs="+", metavar="DATASET", help="List of datasets to download (e.g., AMOS LUNA STOIC)" ) parser.add_argument( "--all", action="store_true", help="Download all available SimNICT datasets" ) parser.add_argument( "--list", action="store_true", help="List available datasets and exit" ) parser.add_argument( "--validate", nargs="*", metavar="DATASET", help="Validate downloaded datasets" ) parser.add_argument( "--output_dir", default="./simnict_data", help="Output directory for downloads (default: ./simnict_data)" ) parser.add_argument( "--resume", action="store_true", help="Resume interrupted downloads (skip existing files)" ) parser.add_argument( "--no-checksum", action="store_true", help="Skip checksum verification (faster but less safe)" ) parser.add_argument( "--max-retries", type=int, default=3, help="Maximum retry attempts for failed downloads (default: 3)" ) args = parser.parse_args() # Handle list command if args.list: downloader = SimNICTDownloader() downloader.list_available_datasets() return # Handle validation if args.validate is not None: datasets_to_validate = args.validate if args.validate else list(SIMNICT_DATASETS.keys()) downloader = SimNICTDownloader(args.output_dir) results = downloader.validate_downloads(datasets_to_validate) return # Determine datasets to download if args.all: datasets = list(SIMNICT_DATASETS.keys()) elif args.datasets: datasets = args.datasets else: parser.error("Must specify --datasets, --all, --list, or --validate") # Validate dataset names invalid_datasets = [d for d in datasets if d not in SIMNICT_DATASETS] if invalid_datasets: logger.error(f"āŒ Invalid dataset names: {invalid_datasets}") logger.info(f"Available datasets: {list(SIMNICT_DATASETS.keys())}") return # Initialize downloader downloader = SimNICTDownloader( output_dir=args.output_dir, max_retries=args.max_retries ) # Show download plan logger.info(f"\nšŸ“‹ Download Plan:") total_size = 0 for dataset in datasets: info = SIMNICT_DATASETS[dataset] logger.info(f" šŸ”¹ {dataset}: {info['size_gb']} ({info['volumes']} volumes)") # Extract size for total calculation try: size_num = float(info['size_gb'].replace('~', '').replace(' GB', '')) total_size += size_num except: pass logger.info(f" šŸ’¾ Total estimated size: ~{total_size:.0f} GB") # Confirm download try: confirm = input(f"\nProceed with download? (y/N): ").strip().lower() if confirm != 'y': logger.info("āŒ Download cancelled by user") return except KeyboardInterrupt: logger.info("\nāŒ Download cancelled by user") return # Start downloads start_time = time.time() results = downloader.download_multiple_datasets(datasets, resume=args.resume) end_time = time.time() # Final report elapsed = end_time - start_time logger.info(f"\nā±ļø Total time: {elapsed:.1f} seconds ({elapsed/60:.1f} minutes)") # Validate downloads if any(results.values()): logger.info("\nšŸ” Validating downloads...") validation_results = downloader.validate_downloads(list(results.keys())) if __name__ == "__main__": main()