Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -88,6 +88,16 @@ except ImportError as e:
|
|
| 88 |
|
| 89 |
import gradio as gr
|
| 90 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
# --- Part 3: Model Initialization (Pre-Load) ---
|
| 92 |
|
| 93 |
# Mock args for inference configuration (required by internal logic)
|
|
|
|
| 88 |
|
| 89 |
import gradio as gr
|
| 90 |
|
| 91 |
+
def dummy_get_gpu_memory(device=None):
|
| 92 |
+
# Return 40GB (in bytes) to trick the config loader into
|
| 93 |
+
# thinking we have a high-end GPU, allowing it to select
|
| 94 |
+
# optimal inference params without triggering torch.cuda.init()
|
| 95 |
+
return 40 * 1024 * 1024 * 1024
|
| 96 |
+
|
| 97 |
+
print("🛠️ Applying ZeroGPU Monkey Patch to hyvideo.commons.get_gpu_memory...")
|
| 98 |
+
hyvideo.commons.get_gpu_memory = dummy_get_gpu_memory
|
| 99 |
+
hyvideo.pipelines.hunyuan_video_pipeline.get_gpu_memory = dummy_get_gpu_memory
|
| 100 |
+
|
| 101 |
# --- Part 3: Model Initialization (Pre-Load) ---
|
| 102 |
|
| 103 |
# Mock args for inference configuration (required by internal logic)
|