Spaces:
Running
on
Zero
Running
on
Zero
better commenting on model loading on zeroGPU or not if cuda not available
Browse files
app.py
CHANGED
|
@@ -40,6 +40,16 @@ try:
|
|
| 40 |
raise FileNotFoundError(f"Base model not found at: {BASE_MODEL_PATH}")
|
| 41 |
if not os.path.exists(LORA_MODEL_PATH):
|
| 42 |
raise FileNotFoundError(f"LoRA model not found at: {LORA_MODEL_PATH}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
|
| 44 |
except Exception as e:
|
| 45 |
print(f"Model loading failed: {e}")
|
|
@@ -56,7 +66,7 @@ except Exception as e:
|
|
| 56 |
demo.launch()
|
| 57 |
exit()
|
| 58 |
|
| 59 |
-
@spaces.GPU(duration=
|
| 60 |
def generate_video(
|
| 61 |
pil_image: Image.Image,
|
| 62 |
prompt: str,
|
|
@@ -70,17 +80,22 @@ def generate_video(
|
|
| 70 |
if pil_image is None:
|
| 71 |
raise gr.Error("Please upload a face image first!")
|
| 72 |
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
|
|
|
|
|
|
| 77 |
|
| 78 |
print("Processing face...")
|
| 79 |
ip_image = face_processor.process(pil_image)
|
| 80 |
print("Face processing completed.")
|
| 81 |
|
| 82 |
if is_shared_ui:
|
| 83 |
-
|
|
|
|
|
|
|
|
|
|
| 84 |
|
| 85 |
print("Generating video...")
|
| 86 |
start_time = time.time()
|
|
@@ -171,11 +186,11 @@ with gr.Blocks() as demo:
|
|
| 171 |
info="More steps may improve details but will take longer to generate.",
|
| 172 |
)
|
| 173 |
output_fps = gr.Slider(
|
| 174 |
-
label="Video FPS", minimum=10, maximum=30, step=1, value=
|
| 175 |
interactive = False if is_shared_ui else True,
|
| 176 |
)
|
| 177 |
output_quality = gr.Slider(
|
| 178 |
-
label="Video Quality", minimum=1, maximum=10, step=1, value=
|
| 179 |
interactive = False if is_shared_ui else True
|
| 180 |
)
|
| 181 |
|
|
|
|
| 40 |
raise FileNotFoundError(f"Base model not found at: {BASE_MODEL_PATH}")
|
| 41 |
if not os.path.exists(LORA_MODEL_PATH):
|
| 42 |
raise FileNotFoundError(f"LoRA model not found at: {LORA_MODEL_PATH}")
|
| 43 |
+
|
| 44 |
+
print(f"Is CUDA available: {torch.cuda.is_available()}")
|
| 45 |
+
if torch.cuda.is_available():
|
| 46 |
+
face_processor = FaceProcessor(antelopv2_path=ANTELOPEV2_PATH)
|
| 47 |
+
pipe = load_wan_pipe(base_path=BASE_MODEL_PATH, torch_dtype=torch.bfloat16)
|
| 48 |
+
set_stand_in(pipe, model_path=LORA_MODEL_PATH)
|
| 49 |
+
print("Model loaded successfully!")
|
| 50 |
+
else:
|
| 51 |
+
print("Will load models on ZeroGPU on inference if available")
|
| 52 |
+
|
| 53 |
|
| 54 |
except Exception as e:
|
| 55 |
print(f"Model loading failed: {e}")
|
|
|
|
| 66 |
demo.launch()
|
| 67 |
exit()
|
| 68 |
|
| 69 |
+
@spaces.GPU(duration=300)
|
| 70 |
def generate_video(
|
| 71 |
pil_image: Image.Image,
|
| 72 |
prompt: str,
|
|
|
|
| 80 |
if pil_image is None:
|
| 81 |
raise gr.Error("Please upload a face image first!")
|
| 82 |
|
| 83 |
+
if is_shared_ui:
|
| 84 |
+
print("Loading models...")
|
| 85 |
+
face_processor = FaceProcessor(antelopv2_path=ANTELOPEV2_PATH)
|
| 86 |
+
pipe = load_wan_pipe(base_path=BASE_MODEL_PATH, torch_dtype=torch.bfloat16)
|
| 87 |
+
set_stand_in(pipe, model_path=LORA_MODEL_PATH)
|
| 88 |
+
print("Model loaded successfully!")
|
| 89 |
|
| 90 |
print("Processing face...")
|
| 91 |
ip_image = face_processor.process(pil_image)
|
| 92 |
print("Face processing completed.")
|
| 93 |
|
| 94 |
if is_shared_ui:
|
| 95 |
+
num_steps = 10
|
| 96 |
+
quality = 5
|
| 97 |
+
fps = 12
|
| 98 |
+
|
| 99 |
|
| 100 |
print("Generating video...")
|
| 101 |
start_time = time.time()
|
|
|
|
| 186 |
info="More steps may improve details but will take longer to generate.",
|
| 187 |
)
|
| 188 |
output_fps = gr.Slider(
|
| 189 |
+
label="Video FPS", minimum=10, maximum=30, step=1, value=12,
|
| 190 |
interactive = False if is_shared_ui else True,
|
| 191 |
)
|
| 192 |
output_quality = gr.Slider(
|
| 193 |
+
label="Video Quality", minimum=1, maximum=10, step=1, value=6,
|
| 194 |
interactive = False if is_shared_ui else True
|
| 195 |
)
|
| 196 |
|