0xZohar commited on
Commit
f17371f
·
verified ·
1 Parent(s): 4885d4a

Fix: Force EngineFast for GPU T4 small environment

Browse files

- Remove CPU fallback logic (not needed on GPU tier)
- Force use of EngineFast which has t2t() method
- Resolves AttributeError: 'Engine' object has no attribute 't2t'

Requirements: HF Space must use GPU hardware (T4 small or better)

Files changed (1) hide show
  1. code/demo.py +4 -14
code/demo.py CHANGED
@@ -78,21 +78,11 @@ def get_gpt_engine_cached():
78
  shape_ckpt_path = 'model_weights/save_shape_cars_whole_p_rot_scratch_4mask_randp.safetensors'
79
  save_gpt_ckpt_path = 'model_weights/save_shape_cars_whole_p_rot_scratch_4mask_randp.safetensors'
80
 
81
- # Select device with safe CPU fallback to avoid "no NVIDIA driver" errors on basic HF instances
82
- device_override = os.getenv("GPT_DEVICE")
83
- if device_override:
84
- device = torch.device(device_override)
85
- print(f"🔧 GPT device override via GPT_DEVICE={device_override}")
86
- else:
87
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
88
-
89
- engine_cls = EngineFast if device.type == "cuda" else Engine
90
- if device.type == "cuda":
91
- print("✅ GPT engine will use CUDA (EngineFast)")
92
- else:
93
- print("ℹ️ GPU not available; using CPU Engine (slower, no CUDA graphs)")
94
 
95
- engine = engine_cls(
96
  config_path, gpt_ckpt_path, shape_ckpt_path, save_gpt_ckpt_path,
97
  device=device,
98
  mode='test'
 
78
  shape_ckpt_path = 'model_weights/save_shape_cars_whole_p_rot_scratch_4mask_randp.safetensors'
79
  save_gpt_ckpt_path = 'model_weights/save_shape_cars_whole_p_rot_scratch_4mask_randp.safetensors'
80
 
81
+ # GPU T4 small environment: Use EngineFast with CUDA (required for t2t method)
82
+ device = torch.device("cuda")
83
+ print("✅ Using EngineFast on GPU (T4 small)")
 
 
 
 
 
 
 
 
 
 
84
 
85
+ engine = EngineFast(
86
  config_path, gpt_ckpt_path, shape_ckpt_path, save_gpt_ckpt_path,
87
  device=device,
88
  mode='test'