Fix model_manager tests for centralized hardware detection
Browse filesUpdate tests to patch IS_T4_GPU and GPU_NAME in mosaic.model_manager
instead of mosaic.hardware, since that's where they're imported and used.
Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
tests/test_model_manager.py
CHANGED
|
@@ -129,6 +129,8 @@ class TestLoadAllModels:
|
|
| 129 |
assert cache.device == torch.device("cpu")
|
| 130 |
assert cache.aggressive_memory_mgmt is False
|
| 131 |
|
|
|
|
|
|
|
| 132 |
@patch("torch.cuda.is_available", return_value=True)
|
| 133 |
@patch("torch.cuda.get_device_name", return_value="NVIDIA A100")
|
| 134 |
@patch("torch.cuda.memory_allocated", return_value=0)
|
|
@@ -156,6 +158,8 @@ class TestLoadAllModels:
|
|
| 156 |
assert cache.is_t4_gpu is False
|
| 157 |
assert cache.aggressive_memory_mgmt is False # A100 should use caching
|
| 158 |
|
|
|
|
|
|
|
| 159 |
@patch("torch.cuda.is_available", return_value=True)
|
| 160 |
@patch("torch.cuda.get_device_name", return_value="Tesla T4")
|
| 161 |
@patch("torch.cuda.memory_allocated", return_value=0)
|
|
@@ -199,6 +203,8 @@ class TestLoadAllModels:
|
|
| 199 |
with patch("pickle.load"):
|
| 200 |
load_all_models(use_gpu=False)
|
| 201 |
|
|
|
|
|
|
|
| 202 |
@patch("torch.cuda.is_available", return_value=True)
|
| 203 |
@patch("torch.cuda.memory_allocated", return_value=0)
|
| 204 |
@patch("torch.cuda.get_device_properties")
|
|
|
|
| 129 |
assert cache.device == torch.device("cpu")
|
| 130 |
assert cache.aggressive_memory_mgmt is False
|
| 131 |
|
| 132 |
+
@patch("mosaic.model_manager.IS_T4_GPU", False)
|
| 133 |
+
@patch("mosaic.model_manager.GPU_NAME", "NVIDIA A100")
|
| 134 |
@patch("torch.cuda.is_available", return_value=True)
|
| 135 |
@patch("torch.cuda.get_device_name", return_value="NVIDIA A100")
|
| 136 |
@patch("torch.cuda.memory_allocated", return_value=0)
|
|
|
|
| 158 |
assert cache.is_t4_gpu is False
|
| 159 |
assert cache.aggressive_memory_mgmt is False # A100 should use caching
|
| 160 |
|
| 161 |
+
@patch("mosaic.model_manager.IS_T4_GPU", True)
|
| 162 |
+
@patch("mosaic.model_manager.GPU_NAME", "Tesla T4")
|
| 163 |
@patch("torch.cuda.is_available", return_value=True)
|
| 164 |
@patch("torch.cuda.get_device_name", return_value="Tesla T4")
|
| 165 |
@patch("torch.cuda.memory_allocated", return_value=0)
|
|
|
|
| 203 |
with patch("pickle.load"):
|
| 204 |
load_all_models(use_gpu=False)
|
| 205 |
|
| 206 |
+
@patch("mosaic.model_manager.IS_T4_GPU", False)
|
| 207 |
+
@patch("mosaic.model_manager.GPU_NAME", "NVIDIA A100")
|
| 208 |
@patch("torch.cuda.is_available", return_value=True)
|
| 209 |
@patch("torch.cuda.memory_allocated", return_value=0)
|
| 210 |
@patch("torch.cuda.get_device_properties")
|