|
|
|
|
|
""" |
|
|
Test script for Zero GPU functionality |
|
|
""" |
|
|
|
|
|
import os |
|
|
import sys |
|
|
import torch |
|
|
import gradio as gr |
|
|
|
|
|
def test_environment(): |
|
|
"""Test environment setup for Zero GPU""" |
|
|
print("=" * 60) |
|
|
print("Testing Zero GPU Environment") |
|
|
print("=" * 60) |
|
|
|
|
|
|
|
|
cuda_available = torch.cuda.is_available() |
|
|
print(f"β CUDA Available: {cuda_available}") |
|
|
|
|
|
if cuda_available: |
|
|
print(f" - Device Name: {torch.cuda.get_device_name()}") |
|
|
print(f" - Device Capability: {torch.cuda.get_device_capability()}") |
|
|
print(f" - Memory Allocated: {torch.cuda.memory_allocated() / 1024**2:.2f} MB") |
|
|
print(f" - Memory Cached: {torch.cuda.memory_reserved() / 1024**2:.2f} MB") |
|
|
|
|
|
|
|
|
print("\nβ Environment Variables:") |
|
|
zero_gpu_vars = { |
|
|
"ZERO_GPU_MODE": os.environ.get("ZERO_GPU_MODE", "not set"), |
|
|
"SPACES_ZERO_GPU": os.environ.get("SPACES_ZERO_GPU", "not set"), |
|
|
"MAX_IMAGES": os.environ.get("MAX_IMAGES", "not set"), |
|
|
"MAX_RESOLUTION": os.environ.get("MAX_RESOLUTION", "not set"), |
|
|
"BATCH_SIZE": os.environ.get("BATCH_SIZE", "not set"), |
|
|
"USE_FLOAT16": os.environ.get("USE_FLOAT16", "not set"), |
|
|
} |
|
|
|
|
|
for key, value in zero_gpu_vars.items(): |
|
|
print(f" - {key}: {value}") |
|
|
|
|
|
|
|
|
try: |
|
|
import spaces |
|
|
print("\nβ Spaces module: Available") |
|
|
print(f" - Has GPU decorator: {hasattr(spaces, 'GPU')}") |
|
|
except ImportError: |
|
|
print("\nβ Spaces module: Not available (install with 'pip install spaces')") |
|
|
|
|
|
print("\n" + "=" * 60) |
|
|
|
|
|
def test_model_loading(): |
|
|
"""Test model loading with Zero GPU optimizations""" |
|
|
print("\nTesting Model Loading...") |
|
|
print("-" * 60) |
|
|
|
|
|
|
|
|
os.environ["ZERO_GPU_MODE"] = "1" |
|
|
os.environ["MAX_IMAGES"] = "10" |
|
|
os.environ["BATCH_SIZE"] = "2" |
|
|
os.environ["USE_FLOAT16"] = "1" |
|
|
|
|
|
|
|
|
try: |
|
|
import mvp |
|
|
print("β MVP module imported successfully") |
|
|
|
|
|
|
|
|
if hasattr(mvp, 'model_manager'): |
|
|
print("β ModelManager instance available") |
|
|
|
|
|
|
|
|
manager = mvp.model_manager |
|
|
print(f" - Device: {manager.device}") |
|
|
print(f" - USE_FLOAT16: {mvp.USE_FLOAT16}") |
|
|
print(f" - MAX_IMAGES: {mvp.MAX_IMAGES}") |
|
|
print(f" - BATCH_SIZE: {mvp.BATCH_SIZE}") |
|
|
else: |
|
|
print("β ModelManager not found in mvp module") |
|
|
|
|
|
except ImportError as e: |
|
|
print(f"β Failed to import mvp module: {e}") |
|
|
except Exception as e: |
|
|
print(f"β Error during model loading test: {e}") |
|
|
|
|
|
print("-" * 60) |
|
|
|
|
|
def test_cpu_mode(): |
|
|
"""Test CPU fallback mode""" |
|
|
print("\nTesting CPU Fallback Mode...") |
|
|
print("-" * 60) |
|
|
|
|
|
|
|
|
original_cuda = os.environ.get("CUDA_VISIBLE_DEVICES") |
|
|
os.environ["CUDA_VISIBLE_DEVICES"] = "" |
|
|
|
|
|
try: |
|
|
cuda_available = torch.cuda.is_available() |
|
|
print(f"β CUDA disabled: {not cuda_available}") |
|
|
|
|
|
|
|
|
os.environ["ZOO3D_ALLOW_CPU"] = "1" |
|
|
os.environ["ZOO3D_CPU_DUMMY"] = "1" |
|
|
|
|
|
import mvp |
|
|
print("β MVP module works in CPU mode") |
|
|
print(f" - ZOO3D_ALLOW_CPU: {mvp.ZOO3D_ALLOW_CPU}") |
|
|
print(f" - ZOO3D_CPU_DUMMY: {mvp.ZOO3D_CPU_DUMMY}") |
|
|
|
|
|
except Exception as e: |
|
|
print(f"β CPU mode test failed: {e}") |
|
|
finally: |
|
|
|
|
|
if original_cuda: |
|
|
os.environ["CUDA_VISIBLE_DEVICES"] = original_cuda |
|
|
else: |
|
|
os.environ.pop("CUDA_VISIBLE_DEVICES", None) |
|
|
|
|
|
print("-" * 60) |
|
|
|
|
|
def test_memory_optimization(): |
|
|
"""Test memory optimization features""" |
|
|
print("\nTesting Memory Optimization...") |
|
|
print("-" * 60) |
|
|
|
|
|
if not torch.cuda.is_available(): |
|
|
print("β Skipping GPU memory tests (CUDA not available)") |
|
|
return |
|
|
|
|
|
|
|
|
print("β Testing float16 precision:") |
|
|
test_tensor = torch.randn(100, 100, device='cuda') |
|
|
fp16_tensor = test_tensor.half() |
|
|
|
|
|
fp32_memory = test_tensor.element_size() * test_tensor.nelement() |
|
|
fp16_memory = fp16_tensor.element_size() * fp16_tensor.nelement() |
|
|
|
|
|
print(f" - FP32 memory: {fp32_memory / 1024:.2f} KB") |
|
|
print(f" - FP16 memory: {fp16_memory / 1024:.2f} KB") |
|
|
print(f" - Memory saved: {(1 - fp16_memory/fp32_memory) * 100:.1f}%") |
|
|
|
|
|
|
|
|
print("\nβ Testing batch processing:") |
|
|
batch_size = int(os.environ.get("BATCH_SIZE", "2")) |
|
|
total_images = 10 |
|
|
|
|
|
num_batches = (total_images + batch_size - 1) // batch_size |
|
|
print(f" - Total images: {total_images}") |
|
|
print(f" - Batch size: {batch_size}") |
|
|
print(f" - Number of batches: {num_batches}") |
|
|
|
|
|
|
|
|
del test_tensor, fp16_tensor |
|
|
torch.cuda.empty_cache() |
|
|
|
|
|
print("-" * 60) |
|
|
|
|
|
def main(): |
|
|
"""Run all tests""" |
|
|
print("\nπ Zoo3D Zero GPU Compatibility Test Suite\n") |
|
|
|
|
|
|
|
|
test_environment() |
|
|
test_model_loading() |
|
|
test_cpu_mode() |
|
|
test_memory_optimization() |
|
|
|
|
|
print("\nβ
All tests completed!") |
|
|
print("\nTo test in different modes, try:") |
|
|
print(" - Zero GPU mode: ZERO_GPU_MODE=1 python test_zero_gpu.py") |
|
|
print(" - CPU mode: CUDA_VISIBLE_DEVICES='' python test_zero_gpu.py") |
|
|
print(" - Normal GPU: python test_zero_gpu.py") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |