multimodalart HF Staff commited on
Commit
c5f0c8a
·
verified ·
1 Parent(s): 340434e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -92,7 +92,7 @@ def dummy_get_gpu_memory(device=None):
92
  # Return 40GB (in bytes) to trick the config loader into
93
  # thinking we have a high-end GPU, allowing it to select
94
  # optimal inference params without triggering torch.cuda.init()
95
- return 40 * 1024 * 1024 * 1024
96
 
97
  print("🛠️ Applying ZeroGPU Monkey Patch to hyvideo.commons.get_gpu_memory...")
98
  hyvideo.commons.get_gpu_memory = dummy_get_gpu_memory
 
92
  # Return 40GB (in bytes) to trick the config loader into
93
  # thinking we have a high-end GPU, allowing it to select
94
  # optimal inference params without triggering torch.cuda.init()
95
+ return 68 * 1024 * 1024 * 1024
96
 
97
  print("🛠️ Applying ZeroGPU Monkey Patch to hyvideo.commons.get_gpu_memory...")
98
  hyvideo.commons.get_gpu_memory = dummy_get_gpu_memory