Spaces:
Paused
Paused
Fix: Safe device selection for CPU/GPU compatibility
Browse files- clip_retrieval.py: Add _resolve_device() with CUDA detection & CPU fallback
- demo.py: Auto-select GPT engine (EngineFast for CUDA, Engine for CPU)
- engine.py: Normalize device_map to string for CLIP text encoder
Resolves: CUDA availability issues on HF Spaces CPU instances
Support: Works on both GPU and CPU tiers
File: code/cube3d/training/engine.py
code/cube3d/training/engine.py
CHANGED
|
@@ -78,11 +78,12 @@ class Engine:
|
|
| 78 |
codebook = self.shape_model.bottleneck.block.get_codebook()
|
| 79 |
codebook = self.gpt_model.shape_proj(codebook).detach()
|
| 80 |
self.gpt_model.transformer.wte.weight.data[: codebook.shape[0]] = codebook
|
| 81 |
-
|
|
|
|
| 82 |
self.text_model = CLIPTextModelWithProjection.from_pretrained(
|
| 83 |
self.cfg.text_model_pretrained_model_name_or_path,
|
| 84 |
force_download=False,
|
| 85 |
-
device_map=
|
| 86 |
cache_dir=HF_CACHE_DIR,
|
| 87 |
).eval()
|
| 88 |
print("------text_model device---------", self.text_model.device)
|
|
|
|
| 78 |
codebook = self.shape_model.bottleneck.block.get_codebook()
|
| 79 |
codebook = self.gpt_model.shape_proj(codebook).detach()
|
| 80 |
self.gpt_model.transformer.wte.weight.data[: codebook.shape[0]] = codebook
|
| 81 |
+
|
| 82 |
+
device_map = self.device.type if isinstance(self.device, torch.device) else self.device
|
| 83 |
self.text_model = CLIPTextModelWithProjection.from_pretrained(
|
| 84 |
self.cfg.text_model_pretrained_model_name_or_path,
|
| 85 |
force_download=False,
|
| 86 |
+
device_map=device_map,
|
| 87 |
cache_dir=HF_CACHE_DIR,
|
| 88 |
).eval()
|
| 89 |
print("------text_model device---------", self.text_model.device)
|