Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -164,27 +164,24 @@ class ArgsNamespace:
|
|
| 164 |
|
| 165 |
initialize_infer_state(ArgsNamespace())
|
| 166 |
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
import traceback
|
| 186 |
-
traceback.print_exc()
|
| 187 |
-
sys.exit(1)
|
| 188 |
|
| 189 |
def save_video_tensor(video_tensor, path, fps=24):
|
| 190 |
if isinstance(video_tensor, list): video_tensor = video_tensor[0]
|
|
@@ -212,14 +209,8 @@ def generate(input_image, prompt, length, steps, shift, seed, guidance):
|
|
| 212 |
print(f"🚀 Moving Pipeline to GPU... (Prompt: {prompt})")
|
| 213 |
|
| 214 |
try:
|
| 215 |
-
# 1. Move Weights
|
| 216 |
-
pipe.to("cuda")
|
| 217 |
-
|
| 218 |
-
# 2. FIX: Manually update internal device reference
|
| 219 |
-
# (Hunyuan uses this attribute instead of .device in some places)
|
| 220 |
pipe.execution_device = torch.device("cuda")
|
| 221 |
|
| 222 |
-
# 3. Run Inference
|
| 223 |
output = pipe(
|
| 224 |
prompt=prompt,
|
| 225 |
height=480, width=854, aspect_ratio="16:9",
|
|
@@ -277,6 +268,5 @@ def create_ui():
|
|
| 277 |
return demo
|
| 278 |
|
| 279 |
if __name__ == "__main__":
|
| 280 |
-
pre_load_model()
|
| 281 |
ui = create_ui()
|
| 282 |
ui.queue().launch(server_name="0.0.0.0", share=True)
|
|
|
|
| 164 |
|
| 165 |
initialize_infer_state(ArgsNamespace())
|
| 166 |
|
| 167 |
+
print(f"⏳ Initializing Pipeline ({TRANSFORMER_VERSION})...")
|
| 168 |
+
try:
|
| 169 |
+
# Load to CPU explicitly
|
| 170 |
+
pipe = HunyuanVideo_1_5_Pipeline.create_pipeline(
|
| 171 |
+
pretrained_model_name_or_path=MODEL_DIR,
|
| 172 |
+
transformer_version=TRANSFORMER_VERSION,
|
| 173 |
+
enable_offloading=ENABLE_OFFLOADING,
|
| 174 |
+
enable_group_offloading=ENABLE_OFFLOADING,
|
| 175 |
+
transformer_dtype=DTYPE,
|
| 176 |
+
device=torch.device('cpu')
|
| 177 |
+
)
|
| 178 |
+
print("✅ Model loaded into CPU RAM.")
|
| 179 |
+
except Exception as e:
|
| 180 |
+
print(f"❌ Failed to load model: {e}")
|
| 181 |
+
import traceback
|
| 182 |
+
traceback.print_exc()
|
| 183 |
+
sys.exit(1)
|
| 184 |
+
pipe.to("cuda")
|
|
|
|
|
|
|
|
|
|
| 185 |
|
| 186 |
def save_video_tensor(video_tensor, path, fps=24):
|
| 187 |
if isinstance(video_tensor, list): video_tensor = video_tensor[0]
|
|
|
|
| 209 |
print(f"🚀 Moving Pipeline to GPU... (Prompt: {prompt})")
|
| 210 |
|
| 211 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
pipe.execution_device = torch.device("cuda")
|
| 213 |
|
|
|
|
| 214 |
output = pipe(
|
| 215 |
prompt=prompt,
|
| 216 |
height=480, width=854, aspect_ratio="16:9",
|
|
|
|
| 268 |
return demo
|
| 269 |
|
| 270 |
if __name__ == "__main__":
|
|
|
|
| 271 |
ui = create_ui()
|
| 272 |
ui.queue().launch(server_name="0.0.0.0", share=True)
|