File size: 5,589 Bytes
f1a83d5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
#!/usr/bin/env python3
"""
Test script for Zero GPU functionality
"""
import os
import sys
import torch
import gradio as gr
def test_environment():
"""Test environment setup for Zero GPU"""
print("=" * 60)
print("Testing Zero GPU Environment")
print("=" * 60)
# Check if CUDA is available
cuda_available = torch.cuda.is_available()
print(f"β CUDA Available: {cuda_available}")
if cuda_available:
print(f" - Device Name: {torch.cuda.get_device_name()}")
print(f" - Device Capability: {torch.cuda.get_device_capability()}")
print(f" - Memory Allocated: {torch.cuda.memory_allocated() / 1024**2:.2f} MB")
print(f" - Memory Cached: {torch.cuda.memory_reserved() / 1024**2:.2f} MB")
# Check environment variables
print("\nβ Environment Variables:")
zero_gpu_vars = {
"ZERO_GPU_MODE": os.environ.get("ZERO_GPU_MODE", "not set"),
"SPACES_ZERO_GPU": os.environ.get("SPACES_ZERO_GPU", "not set"),
"MAX_IMAGES": os.environ.get("MAX_IMAGES", "not set"),
"MAX_RESOLUTION": os.environ.get("MAX_RESOLUTION", "not set"),
"BATCH_SIZE": os.environ.get("BATCH_SIZE", "not set"),
"USE_FLOAT16": os.environ.get("USE_FLOAT16", "not set"),
}
for key, value in zero_gpu_vars.items():
print(f" - {key}: {value}")
# Check if spaces module is available
try:
import spaces
print("\nβ Spaces module: Available")
print(f" - Has GPU decorator: {hasattr(spaces, 'GPU')}")
except ImportError:
print("\nβ Spaces module: Not available (install with 'pip install spaces')")
print("\n" + "=" * 60)
def test_model_loading():
"""Test model loading with Zero GPU optimizations"""
print("\nTesting Model Loading...")
print("-" * 60)
# Set Zero GPU mode for testing
os.environ["ZERO_GPU_MODE"] = "1"
os.environ["MAX_IMAGES"] = "10"
os.environ["BATCH_SIZE"] = "2"
os.environ["USE_FLOAT16"] = "1"
# Import mvp module to test ModelManager
try:
import mvp
print("β MVP module imported successfully")
# Check if ModelManager exists
if hasattr(mvp, 'model_manager'):
print("β ModelManager instance available")
# Check manager configuration
manager = mvp.model_manager
print(f" - Device: {manager.device}")
print(f" - USE_FLOAT16: {mvp.USE_FLOAT16}")
print(f" - MAX_IMAGES: {mvp.MAX_IMAGES}")
print(f" - BATCH_SIZE: {mvp.BATCH_SIZE}")
else:
print("β ModelManager not found in mvp module")
except ImportError as e:
print(f"β Failed to import mvp module: {e}")
except Exception as e:
print(f"β Error during model loading test: {e}")
print("-" * 60)
def test_cpu_mode():
"""Test CPU fallback mode"""
print("\nTesting CPU Fallback Mode...")
print("-" * 60)
# Temporarily disable CUDA
original_cuda = os.environ.get("CUDA_VISIBLE_DEVICES")
os.environ["CUDA_VISIBLE_DEVICES"] = ""
try:
cuda_available = torch.cuda.is_available()
print(f"β CUDA disabled: {not cuda_available}")
# Test CPU mode flags
os.environ["ZOO3D_ALLOW_CPU"] = "1"
os.environ["ZOO3D_CPU_DUMMY"] = "1"
import mvp
print("β MVP module works in CPU mode")
print(f" - ZOO3D_ALLOW_CPU: {mvp.ZOO3D_ALLOW_CPU}")
print(f" - ZOO3D_CPU_DUMMY: {mvp.ZOO3D_CPU_DUMMY}")
except Exception as e:
print(f"β CPU mode test failed: {e}")
finally:
# Restore original CUDA setting
if original_cuda:
os.environ["CUDA_VISIBLE_DEVICES"] = original_cuda
else:
os.environ.pop("CUDA_VISIBLE_DEVICES", None)
print("-" * 60)
def test_memory_optimization():
"""Test memory optimization features"""
print("\nTesting Memory Optimization...")
print("-" * 60)
if not torch.cuda.is_available():
print("β Skipping GPU memory tests (CUDA not available)")
return
# Test float16 usage
print("β Testing float16 precision:")
test_tensor = torch.randn(100, 100, device='cuda')
fp16_tensor = test_tensor.half()
fp32_memory = test_tensor.element_size() * test_tensor.nelement()
fp16_memory = fp16_tensor.element_size() * fp16_tensor.nelement()
print(f" - FP32 memory: {fp32_memory / 1024:.2f} KB")
print(f" - FP16 memory: {fp16_memory / 1024:.2f} KB")
print(f" - Memory saved: {(1 - fp16_memory/fp32_memory) * 100:.1f}%")
# Test batch processing
print("\nβ Testing batch processing:")
batch_size = int(os.environ.get("BATCH_SIZE", "2"))
total_images = 10
num_batches = (total_images + batch_size - 1) // batch_size
print(f" - Total images: {total_images}")
print(f" - Batch size: {batch_size}")
print(f" - Number of batches: {num_batches}")
# Clean up
del test_tensor, fp16_tensor
torch.cuda.empty_cache()
print("-" * 60)
def main():
"""Run all tests"""
print("\nπ Zoo3D Zero GPU Compatibility Test Suite\n")
# Run tests
test_environment()
test_model_loading()
test_cpu_mode()
test_memory_optimization()
print("\nβ
All tests completed!")
print("\nTo test in different modes, try:")
print(" - Zero GPU mode: ZERO_GPU_MODE=1 python test_zero_gpu.py")
print(" - CPU mode: CUDA_VISIBLE_DEVICES='' python test_zero_gpu.py")
print(" - Normal GPU: python test_zero_gpu.py")
if __name__ == "__main__":
main() |