|
|
|
|
|
|
|
|
"""
|
|
|
SimNICT Dataset Batch Downloader
|
|
|
Download complete SimNICT datasets from Internet Archive
|
|
|
|
|
|
IMPORTANT: This downloader provides access to 8 out of 10 original SimNICT datasets.
|
|
|
AutoPET and HECKTOR22 are excluded from public release due to licensing restrictions.
|
|
|
|
|
|
Usage:
|
|
|
python download_simnict.py --datasets AMOS COVID_19_NY_SBU --output_dir ./data
|
|
|
python download_simnict.py --all --output_dir ./data
|
|
|
python download_simnict.py --list # Show available datasets
|
|
|
|
|
|
Author: TAMP Research Group
|
|
|
Version: 1.0
|
|
|
"""
|
|
|
|
|
|
import os
|
|
|
import sys
|
|
|
import argparse
|
|
|
import time
|
|
|
from pathlib import Path
|
|
|
from typing import List, Dict, Optional
|
|
|
import logging
|
|
|
|
|
|
try:
|
|
|
import internetarchive as ia
|
|
|
except ImportError:
|
|
|
print("❌ Error: internetarchive library not found")
|
|
|
print("Please install it using: pip install internetarchive")
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
SIMNICT_DATASETS = {
|
|
|
"AMOS": {
|
|
|
"identifier": "simnict-amos",
|
|
|
"description": "Abdominal multi-organ segmentation dataset",
|
|
|
"volumes": 500,
|
|
|
"files": 504,
|
|
|
"size_gb": "~22 GB"
|
|
|
},
|
|
|
"COVID_19_NY_SBU": {
|
|
|
"identifier": "simnict-covid-19-ny-sbu",
|
|
|
"description": "COVID-19 NY-SBU chest CT dataset",
|
|
|
"volumes": 459,
|
|
|
"files": 463,
|
|
|
"size_gb": "~30 GB"
|
|
|
},
|
|
|
"CT_Images_COVID19": {
|
|
|
"identifier": "simnict-ct-images-in-covid-19",
|
|
|
"description": "CT Images in COVID-19 dataset",
|
|
|
"volumes": 771,
|
|
|
"files": 775,
|
|
|
"size_gb": "~13 GB"
|
|
|
},
|
|
|
"CT_COLONOGRAPHY": {
|
|
|
"identifier": "simnict-ct-colonography",
|
|
|
"description": "CT colonography screening dataset",
|
|
|
"volumes": 1730,
|
|
|
"files": 1734,
|
|
|
"size_gb": "~271 GB"
|
|
|
},
|
|
|
"LNDb": {
|
|
|
"identifier": "simnict-lndb",
|
|
|
"description": "Lung nodule database",
|
|
|
"volumes": 294,
|
|
|
"files": 298,
|
|
|
"size_gb": "~34 GB"
|
|
|
},
|
|
|
"LUNA": {
|
|
|
"identifier": "simnict-luna",
|
|
|
"description": "Lung nodule analysis dataset",
|
|
|
"volumes": 888,
|
|
|
"files": 892,
|
|
|
"size_gb": "~63 GB"
|
|
|
},
|
|
|
"MELA": {
|
|
|
"identifier": "simnict-mela",
|
|
|
"description": "Melanoma detection dataset",
|
|
|
"volumes": 1100,
|
|
|
"files": 1104,
|
|
|
"size_gb": "~147 GB"
|
|
|
},
|
|
|
"STOIC": {
|
|
|
"identifier": "simnict-stoic",
|
|
|
"description": "COVID-19 AI challenge dataset",
|
|
|
"volumes": 2000,
|
|
|
"files": 2004,
|
|
|
"size_gb": "~243 GB"
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
logging.basicConfig(
|
|
|
level=logging.INFO,
|
|
|
format='%(asctime)s - %(levelname)s - %(message)s',
|
|
|
handlers=[
|
|
|
logging.FileHandler('simnict_download.log'),
|
|
|
logging.StreamHandler()
|
|
|
]
|
|
|
)
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class SimNICTDownloader:
|
|
|
def __init__(self, output_dir: str = "./simnict_data",
|
|
|
max_retries: int = 3, chunk_size: int = 1024*1024):
|
|
|
"""
|
|
|
Initialize SimNICT downloader
|
|
|
|
|
|
Args:
|
|
|
output_dir: Directory to save downloaded datasets
|
|
|
max_retries: Maximum retry attempts for failed downloads
|
|
|
chunk_size: Download chunk size in bytes (default 1MB)
|
|
|
"""
|
|
|
self.output_dir = Path(output_dir)
|
|
|
self.max_retries = max_retries
|
|
|
self.chunk_size = chunk_size
|
|
|
|
|
|
|
|
|
self.output_dir.mkdir(parents=True, exist_ok=True)
|
|
|
logger.info(f"📁 Output directory: {self.output_dir.absolute()}")
|
|
|
|
|
|
def list_available_datasets(self) -> None:
|
|
|
"""Display all available SimNICT datasets"""
|
|
|
print("\n" + "="*80)
|
|
|
print("📋 Available SimNICT Datasets (8 out of 10 original datasets)")
|
|
|
print("="*80)
|
|
|
print("ℹ️ Note: AutoPET and HECKTOR22 excluded due to licensing restrictions")
|
|
|
print("="*80)
|
|
|
|
|
|
total_size = 0
|
|
|
total_volumes = 0
|
|
|
|
|
|
for name, info in SIMNICT_DATASETS.items():
|
|
|
print(f"\n🔹 {name}")
|
|
|
print(f" 📝 Description: {info['description']}")
|
|
|
print(f" 📊 Volumes: {info['volumes']:,}")
|
|
|
print(f" 📄 Files: {info['files']:,}")
|
|
|
print(f" 💾 Size: {info['size_gb']}")
|
|
|
print(f" 🏷️ ID: {info['identifier']}")
|
|
|
print(f" 🔗 URL: https://archive.org/details/{info['identifier']}")
|
|
|
|
|
|
total_volumes += info['volumes']
|
|
|
|
|
|
size_str = info['size_gb'].replace('~', '').replace(' GB', '')
|
|
|
try:
|
|
|
total_size += float(size_str)
|
|
|
except:
|
|
|
pass
|
|
|
|
|
|
print(f"\n📈 Total Statistics:")
|
|
|
print(f" 🗂️ Datasets: {len(SIMNICT_DATASETS)}")
|
|
|
print(f" 📊 Total Volumes: {total_volumes:,}")
|
|
|
print(f" 💾 Total Size: ~{total_size:.0f} GB")
|
|
|
print("="*80)
|
|
|
|
|
|
def check_dataset_exists(self, identifier: str) -> bool:
|
|
|
"""Check if dataset exists on Internet Archive"""
|
|
|
try:
|
|
|
item = ia.get_item(identifier)
|
|
|
return item.exists
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error checking dataset {identifier}: {e}")
|
|
|
return False
|
|
|
|
|
|
def get_dataset_files(self, identifier: str) -> List[str]:
|
|
|
"""Get list of files in a dataset"""
|
|
|
try:
|
|
|
item = ia.get_item(identifier)
|
|
|
if not item.exists:
|
|
|
return []
|
|
|
|
|
|
files = []
|
|
|
for file_obj in item.files:
|
|
|
if isinstance(file_obj, dict) and 'name' in file_obj:
|
|
|
|
|
|
filename = file_obj['name']
|
|
|
if filename.endswith('.nii.gz'):
|
|
|
files.append(filename)
|
|
|
|
|
|
return sorted(files)
|
|
|
except Exception as e:
|
|
|
logger.error(f"Error getting files for {identifier}: {e}")
|
|
|
return []
|
|
|
|
|
|
def download_dataset(self, dataset_name: str,
|
|
|
resume: bool = True,
|
|
|
verify_checksum: bool = True) -> bool:
|
|
|
"""
|
|
|
Download a specific SimNICT dataset
|
|
|
|
|
|
Args:
|
|
|
dataset_name: Name of dataset to download
|
|
|
resume: Whether to resume partial downloads
|
|
|
verify_checksum: Whether to verify file checksums
|
|
|
|
|
|
Returns:
|
|
|
True if download successful, False otherwise
|
|
|
"""
|
|
|
if dataset_name not in SIMNICT_DATASETS:
|
|
|
logger.error(f"❌ Unknown dataset: {dataset_name}")
|
|
|
logger.info(f"Available datasets: {list(SIMNICT_DATASETS.keys())}")
|
|
|
return False
|
|
|
|
|
|
dataset_info = SIMNICT_DATASETS[dataset_name]
|
|
|
identifier = dataset_info['identifier']
|
|
|
|
|
|
logger.info(f"\n{'='*60}")
|
|
|
logger.info(f"📤 Starting download: {dataset_name}")
|
|
|
logger.info(f"🏷️ Identifier: {identifier}")
|
|
|
logger.info(f"📊 Expected volumes: {dataset_info['volumes']}")
|
|
|
logger.info(f"💾 Estimated size: {dataset_info['size_gb']}")
|
|
|
logger.info(f"{'='*60}")
|
|
|
|
|
|
|
|
|
if not self.check_dataset_exists(identifier):
|
|
|
logger.error(f"❌ Dataset not found on Internet Archive: {identifier}")
|
|
|
return False
|
|
|
|
|
|
|
|
|
dataset_dir = self.output_dir / dataset_name
|
|
|
dataset_dir.mkdir(exist_ok=True)
|
|
|
|
|
|
|
|
|
files_to_download = self.get_dataset_files(identifier)
|
|
|
if not files_to_download:
|
|
|
logger.error(f"❌ No files found for dataset: {dataset_name}")
|
|
|
return False
|
|
|
|
|
|
logger.info(f"📋 Found {len(files_to_download)} files to download")
|
|
|
|
|
|
|
|
|
existing_files = set()
|
|
|
if resume:
|
|
|
for file_path in dataset_dir.iterdir():
|
|
|
if file_path.is_file() and file_path.suffix == '.gz':
|
|
|
existing_files.add(file_path.name)
|
|
|
|
|
|
if existing_files:
|
|
|
logger.info(f"📂 Found {len(existing_files)} existing files (resume mode)")
|
|
|
|
|
|
|
|
|
successful_downloads = 0
|
|
|
failed_downloads = 0
|
|
|
skipped_files = 0
|
|
|
|
|
|
for i, filename in enumerate(files_to_download, 1):
|
|
|
file_path = dataset_dir / filename
|
|
|
|
|
|
|
|
|
if resume and filename in existing_files:
|
|
|
logger.info(f"⏭️ Skipping existing file [{i}/{len(files_to_download)}]: {filename}")
|
|
|
skipped_files += 1
|
|
|
continue
|
|
|
|
|
|
logger.info(f"📥 Downloading [{i}/{len(files_to_download)}]: {filename}")
|
|
|
|
|
|
success = self._download_file_with_retry(
|
|
|
identifier, filename, file_path, verify_checksum
|
|
|
)
|
|
|
|
|
|
if success:
|
|
|
successful_downloads += 1
|
|
|
logger.info(f"✅ Downloaded: {filename}")
|
|
|
else:
|
|
|
failed_downloads += 1
|
|
|
logger.error(f"❌ Failed: {filename}")
|
|
|
|
|
|
|
|
|
time.sleep(0.5)
|
|
|
|
|
|
|
|
|
logger.info(f"\n📊 Download Summary for {dataset_name}:")
|
|
|
logger.info(f" ✅ Successful: {successful_downloads}")
|
|
|
logger.info(f" ⏭️ Skipped: {skipped_files}")
|
|
|
logger.info(f" ❌ Failed: {failed_downloads}")
|
|
|
logger.info(f" 📁 Location: {dataset_dir.absolute()}")
|
|
|
|
|
|
return failed_downloads == 0
|
|
|
|
|
|
def _download_file_with_retry(self, identifier: str, filename: str,
|
|
|
file_path: Path, verify_checksum: bool) -> bool:
|
|
|
"""Download single file with retry logic"""
|
|
|
for attempt in range(self.max_retries):
|
|
|
try:
|
|
|
|
|
|
item = ia.get_item(identifier)
|
|
|
|
|
|
|
|
|
file_obj = None
|
|
|
for f in item.files:
|
|
|
if isinstance(f, dict) and f.get('name') == filename:
|
|
|
file_obj = f
|
|
|
break
|
|
|
|
|
|
if not file_obj:
|
|
|
logger.error(f"File not found in item: {filename}")
|
|
|
return False
|
|
|
|
|
|
|
|
|
success = item.download(
|
|
|
files=[filename],
|
|
|
destdir=file_path.parent,
|
|
|
verify=verify_checksum,
|
|
|
verbose=False,
|
|
|
retries=1
|
|
|
)
|
|
|
|
|
|
if success and file_path.exists():
|
|
|
return True
|
|
|
else:
|
|
|
raise Exception("Download failed or file not created")
|
|
|
|
|
|
except Exception as e:
|
|
|
logger.warning(f"⚠️ Attempt {attempt + 1}/{self.max_retries} failed for {filename}: {e}")
|
|
|
|
|
|
if attempt < self.max_retries - 1:
|
|
|
wait_time = (attempt + 1) * 2
|
|
|
logger.info(f"🔄 Retrying in {wait_time} seconds...")
|
|
|
time.sleep(wait_time)
|
|
|
else:
|
|
|
logger.error(f"💔 All {self.max_retries} attempts failed for {filename}")
|
|
|
return False
|
|
|
|
|
|
return False
|
|
|
|
|
|
def download_multiple_datasets(self, dataset_names: List[str],
|
|
|
resume: bool = True) -> Dict[str, bool]:
|
|
|
"""
|
|
|
Download multiple SimNICT datasets
|
|
|
|
|
|
Args:
|
|
|
dataset_names: List of dataset names to download
|
|
|
resume: Whether to resume partial downloads
|
|
|
|
|
|
Returns:
|
|
|
Dictionary mapping dataset names to success status
|
|
|
"""
|
|
|
if not dataset_names:
|
|
|
logger.error("❌ No datasets specified")
|
|
|
return {}
|
|
|
|
|
|
logger.info(f"\n🚀 Starting batch download of {len(dataset_names)} datasets")
|
|
|
logger.info(f"📋 Datasets: {', '.join(dataset_names)}")
|
|
|
|
|
|
results = {}
|
|
|
successful = 0
|
|
|
|
|
|
for i, dataset_name in enumerate(dataset_names, 1):
|
|
|
logger.info(f"\n{'🔄' * 20} Dataset {i}/{len(dataset_names)} {'🔄' * 20}")
|
|
|
|
|
|
success = self.download_dataset(dataset_name, resume=resume)
|
|
|
results[dataset_name] = success
|
|
|
|
|
|
if success:
|
|
|
successful += 1
|
|
|
logger.info(f"🎉 Successfully downloaded: {dataset_name}")
|
|
|
else:
|
|
|
logger.error(f"💔 Failed to download: {dataset_name}")
|
|
|
|
|
|
|
|
|
logger.info(f"\n{'=' * 80}")
|
|
|
logger.info(f"🏁 Batch Download Complete")
|
|
|
logger.info(f"{'=' * 80}")
|
|
|
logger.info(f"✅ Successful: {successful}/{len(dataset_names)}")
|
|
|
logger.info(f"❌ Failed: {len(dataset_names) - successful}")
|
|
|
|
|
|
for dataset_name, success in results.items():
|
|
|
status = "✅" if success else "❌"
|
|
|
logger.info(f" {status} {dataset_name}")
|
|
|
|
|
|
return results
|
|
|
|
|
|
def validate_downloads(self, dataset_names: List[str]) -> Dict[str, Dict]:
|
|
|
"""
|
|
|
Validate downloaded datasets
|
|
|
|
|
|
Args:
|
|
|
dataset_names: List of dataset names to validate
|
|
|
|
|
|
Returns:
|
|
|
Validation results for each dataset
|
|
|
"""
|
|
|
logger.info(f"\n🔍 Validating {len(dataset_names)} datasets...")
|
|
|
|
|
|
results = {}
|
|
|
|
|
|
for dataset_name in dataset_names:
|
|
|
if dataset_name not in SIMNICT_DATASETS:
|
|
|
continue
|
|
|
|
|
|
dataset_dir = self.output_dir / dataset_name
|
|
|
expected_info = SIMNICT_DATASETS[dataset_name]
|
|
|
|
|
|
if not dataset_dir.exists():
|
|
|
results[dataset_name] = {
|
|
|
"status": "missing",
|
|
|
"message": "Dataset directory not found"
|
|
|
}
|
|
|
continue
|
|
|
|
|
|
|
|
|
nii_files = list(dataset_dir.glob("*.nii.gz"))
|
|
|
file_count = len(nii_files)
|
|
|
|
|
|
expected_files = expected_info['files']
|
|
|
completion_rate = (file_count / expected_files) * 100
|
|
|
|
|
|
if file_count == expected_files:
|
|
|
status = "complete"
|
|
|
message = f"All {file_count} files downloaded successfully"
|
|
|
elif file_count > 0:
|
|
|
status = "partial"
|
|
|
message = f"Partial download: {file_count}/{expected_files} files ({completion_rate:.1f}%)"
|
|
|
else:
|
|
|
status = "empty"
|
|
|
message = "No files found"
|
|
|
|
|
|
results[dataset_name] = {
|
|
|
"status": status,
|
|
|
"files_found": file_count,
|
|
|
"files_expected": expected_files,
|
|
|
"completion_rate": completion_rate,
|
|
|
"message": message
|
|
|
}
|
|
|
|
|
|
logger.info(f"📊 {dataset_name}: {message}")
|
|
|
|
|
|
return results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
|
|
parser = argparse.ArgumentParser(
|
|
|
description="Download SimNICT datasets from Internet Archive",
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
epilog="""
|
|
|
Examples:
|
|
|
# List available datasets
|
|
|
python download_simnict.py --list
|
|
|
|
|
|
# Download specific datasets
|
|
|
python download_simnict.py --datasets AMOS COVID_19_NY_SBU --output_dir ./data
|
|
|
|
|
|
# Download all datasets
|
|
|
python download_simnict.py --all --output_dir ./data
|
|
|
|
|
|
# Resume interrupted downloads
|
|
|
python download_simnict.py --datasets STOIC --resume --output_dir ./data
|
|
|
|
|
|
# Validate existing downloads
|
|
|
python download_simnict.py --validate AMOS LUNA --output_dir ./data
|
|
|
"""
|
|
|
)
|
|
|
|
|
|
parser.add_argument(
|
|
|
"--datasets", nargs="+", metavar="DATASET",
|
|
|
help="List of datasets to download (e.g., AMOS LUNA STOIC)"
|
|
|
)
|
|
|
|
|
|
parser.add_argument(
|
|
|
"--all", action="store_true",
|
|
|
help="Download all available SimNICT datasets"
|
|
|
)
|
|
|
|
|
|
parser.add_argument(
|
|
|
"--list", action="store_true",
|
|
|
help="List available datasets and exit"
|
|
|
)
|
|
|
|
|
|
parser.add_argument(
|
|
|
"--validate", nargs="*", metavar="DATASET",
|
|
|
help="Validate downloaded datasets"
|
|
|
)
|
|
|
|
|
|
parser.add_argument(
|
|
|
"--output_dir", default="./simnict_data",
|
|
|
help="Output directory for downloads (default: ./simnict_data)"
|
|
|
)
|
|
|
|
|
|
parser.add_argument(
|
|
|
"--resume", action="store_true",
|
|
|
help="Resume interrupted downloads (skip existing files)"
|
|
|
)
|
|
|
|
|
|
parser.add_argument(
|
|
|
"--no-checksum", action="store_true",
|
|
|
help="Skip checksum verification (faster but less safe)"
|
|
|
)
|
|
|
|
|
|
parser.add_argument(
|
|
|
"--max-retries", type=int, default=3,
|
|
|
help="Maximum retry attempts for failed downloads (default: 3)"
|
|
|
)
|
|
|
|
|
|
args = parser.parse_args()
|
|
|
|
|
|
|
|
|
if args.list:
|
|
|
downloader = SimNICTDownloader()
|
|
|
downloader.list_available_datasets()
|
|
|
return
|
|
|
|
|
|
|
|
|
if args.validate is not None:
|
|
|
datasets_to_validate = args.validate if args.validate else list(SIMNICT_DATASETS.keys())
|
|
|
downloader = SimNICTDownloader(args.output_dir)
|
|
|
results = downloader.validate_downloads(datasets_to_validate)
|
|
|
return
|
|
|
|
|
|
|
|
|
if args.all:
|
|
|
datasets = list(SIMNICT_DATASETS.keys())
|
|
|
elif args.datasets:
|
|
|
datasets = args.datasets
|
|
|
else:
|
|
|
parser.error("Must specify --datasets, --all, --list, or --validate")
|
|
|
|
|
|
|
|
|
invalid_datasets = [d for d in datasets if d not in SIMNICT_DATASETS]
|
|
|
if invalid_datasets:
|
|
|
logger.error(f"❌ Invalid dataset names: {invalid_datasets}")
|
|
|
logger.info(f"Available datasets: {list(SIMNICT_DATASETS.keys())}")
|
|
|
return
|
|
|
|
|
|
|
|
|
downloader = SimNICTDownloader(
|
|
|
output_dir=args.output_dir,
|
|
|
max_retries=args.max_retries
|
|
|
)
|
|
|
|
|
|
|
|
|
logger.info(f"\n📋 Download Plan:")
|
|
|
total_size = 0
|
|
|
for dataset in datasets:
|
|
|
info = SIMNICT_DATASETS[dataset]
|
|
|
logger.info(f" 🔹 {dataset}: {info['size_gb']} ({info['volumes']} volumes)")
|
|
|
|
|
|
try:
|
|
|
size_num = float(info['size_gb'].replace('~', '').replace(' GB', ''))
|
|
|
total_size += size_num
|
|
|
except:
|
|
|
pass
|
|
|
|
|
|
logger.info(f" 💾 Total estimated size: ~{total_size:.0f} GB")
|
|
|
|
|
|
|
|
|
try:
|
|
|
confirm = input(f"\nProceed with download? (y/N): ").strip().lower()
|
|
|
if confirm != 'y':
|
|
|
logger.info("❌ Download cancelled by user")
|
|
|
return
|
|
|
except KeyboardInterrupt:
|
|
|
logger.info("\n❌ Download cancelled by user")
|
|
|
return
|
|
|
|
|
|
|
|
|
start_time = time.time()
|
|
|
results = downloader.download_multiple_datasets(datasets, resume=args.resume)
|
|
|
end_time = time.time()
|
|
|
|
|
|
|
|
|
elapsed = end_time - start_time
|
|
|
logger.info(f"\n⏱️ Total time: {elapsed:.1f} seconds ({elapsed/60:.1f} minutes)")
|
|
|
|
|
|
|
|
|
if any(results.values()):
|
|
|
logger.info("\n🔍 Validating downloads...")
|
|
|
validation_results = downloader.validate_downloads(list(results.keys()))
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
main()
|
|
|
|