zoo3d / test_zero_gpu.py
bulatko's picture
deploy w zero GPU
f1a83d5
#!/usr/bin/env python3
"""
Test script for Zero GPU functionality
"""
import os
import sys
import torch
import gradio as gr
def test_environment():
"""Test environment setup for Zero GPU"""
print("=" * 60)
print("Testing Zero GPU Environment")
print("=" * 60)
# Check if CUDA is available
cuda_available = torch.cuda.is_available()
print(f"βœ“ CUDA Available: {cuda_available}")
if cuda_available:
print(f" - Device Name: {torch.cuda.get_device_name()}")
print(f" - Device Capability: {torch.cuda.get_device_capability()}")
print(f" - Memory Allocated: {torch.cuda.memory_allocated() / 1024**2:.2f} MB")
print(f" - Memory Cached: {torch.cuda.memory_reserved() / 1024**2:.2f} MB")
# Check environment variables
print("\nβœ“ Environment Variables:")
zero_gpu_vars = {
"ZERO_GPU_MODE": os.environ.get("ZERO_GPU_MODE", "not set"),
"SPACES_ZERO_GPU": os.environ.get("SPACES_ZERO_GPU", "not set"),
"MAX_IMAGES": os.environ.get("MAX_IMAGES", "not set"),
"MAX_RESOLUTION": os.environ.get("MAX_RESOLUTION", "not set"),
"BATCH_SIZE": os.environ.get("BATCH_SIZE", "not set"),
"USE_FLOAT16": os.environ.get("USE_FLOAT16", "not set"),
}
for key, value in zero_gpu_vars.items():
print(f" - {key}: {value}")
# Check if spaces module is available
try:
import spaces
print("\nβœ“ Spaces module: Available")
print(f" - Has GPU decorator: {hasattr(spaces, 'GPU')}")
except ImportError:
print("\nβœ— Spaces module: Not available (install with 'pip install spaces')")
print("\n" + "=" * 60)
def test_model_loading():
"""Test model loading with Zero GPU optimizations"""
print("\nTesting Model Loading...")
print("-" * 60)
# Set Zero GPU mode for testing
os.environ["ZERO_GPU_MODE"] = "1"
os.environ["MAX_IMAGES"] = "10"
os.environ["BATCH_SIZE"] = "2"
os.environ["USE_FLOAT16"] = "1"
# Import mvp module to test ModelManager
try:
import mvp
print("βœ“ MVP module imported successfully")
# Check if ModelManager exists
if hasattr(mvp, 'model_manager'):
print("βœ“ ModelManager instance available")
# Check manager configuration
manager = mvp.model_manager
print(f" - Device: {manager.device}")
print(f" - USE_FLOAT16: {mvp.USE_FLOAT16}")
print(f" - MAX_IMAGES: {mvp.MAX_IMAGES}")
print(f" - BATCH_SIZE: {mvp.BATCH_SIZE}")
else:
print("βœ— ModelManager not found in mvp module")
except ImportError as e:
print(f"βœ— Failed to import mvp module: {e}")
except Exception as e:
print(f"βœ— Error during model loading test: {e}")
print("-" * 60)
def test_cpu_mode():
"""Test CPU fallback mode"""
print("\nTesting CPU Fallback Mode...")
print("-" * 60)
# Temporarily disable CUDA
original_cuda = os.environ.get("CUDA_VISIBLE_DEVICES")
os.environ["CUDA_VISIBLE_DEVICES"] = ""
try:
cuda_available = torch.cuda.is_available()
print(f"βœ“ CUDA disabled: {not cuda_available}")
# Test CPU mode flags
os.environ["ZOO3D_ALLOW_CPU"] = "1"
os.environ["ZOO3D_CPU_DUMMY"] = "1"
import mvp
print("βœ“ MVP module works in CPU mode")
print(f" - ZOO3D_ALLOW_CPU: {mvp.ZOO3D_ALLOW_CPU}")
print(f" - ZOO3D_CPU_DUMMY: {mvp.ZOO3D_CPU_DUMMY}")
except Exception as e:
print(f"βœ— CPU mode test failed: {e}")
finally:
# Restore original CUDA setting
if original_cuda:
os.environ["CUDA_VISIBLE_DEVICES"] = original_cuda
else:
os.environ.pop("CUDA_VISIBLE_DEVICES", None)
print("-" * 60)
def test_memory_optimization():
"""Test memory optimization features"""
print("\nTesting Memory Optimization...")
print("-" * 60)
if not torch.cuda.is_available():
print("⚠ Skipping GPU memory tests (CUDA not available)")
return
# Test float16 usage
print("βœ“ Testing float16 precision:")
test_tensor = torch.randn(100, 100, device='cuda')
fp16_tensor = test_tensor.half()
fp32_memory = test_tensor.element_size() * test_tensor.nelement()
fp16_memory = fp16_tensor.element_size() * fp16_tensor.nelement()
print(f" - FP32 memory: {fp32_memory / 1024:.2f} KB")
print(f" - FP16 memory: {fp16_memory / 1024:.2f} KB")
print(f" - Memory saved: {(1 - fp16_memory/fp32_memory) * 100:.1f}%")
# Test batch processing
print("\nβœ“ Testing batch processing:")
batch_size = int(os.environ.get("BATCH_SIZE", "2"))
total_images = 10
num_batches = (total_images + batch_size - 1) // batch_size
print(f" - Total images: {total_images}")
print(f" - Batch size: {batch_size}")
print(f" - Number of batches: {num_batches}")
# Clean up
del test_tensor, fp16_tensor
torch.cuda.empty_cache()
print("-" * 60)
def main():
"""Run all tests"""
print("\nπŸš€ Zoo3D Zero GPU Compatibility Test Suite\n")
# Run tests
test_environment()
test_model_loading()
test_cpu_mode()
test_memory_optimization()
print("\nβœ… All tests completed!")
print("\nTo test in different modes, try:")
print(" - Zero GPU mode: ZERO_GPU_MODE=1 python test_zero_gpu.py")
print(" - CPU mode: CUDA_VISIBLE_DEVICES='' python test_zero_gpu.py")
print(" - Normal GPU: python test_zero_gpu.py")
if __name__ == "__main__":
main()