Spaces:
Running on Zero
Running on Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,5 +1,3 @@
|
|
| 1 |
-
# third
|
| 2 |
-
|
| 3 |
"""
|
| 4 |
3D Camera View Generator
|
| 5 |
- Qwen Image Edit + Lightning LoRA + Multi-Angle LoRA
|
|
@@ -34,41 +32,36 @@ MAX_SEED = np.iinfo(np.int32).max
|
|
| 34 |
dtype = torch.bfloat16
|
| 35 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 36 |
|
| 37 |
-
# ββ Model
|
| 38 |
#
|
| 39 |
# HOW MODEL LOADING WORKS ON ZEROGPU
|
| 40 |
# ββββββββββββββββββββββββββββββββββββ
|
| 41 |
-
# β’
|
| 42 |
-
#
|
| 43 |
-
#
|
| 44 |
-
#
|
| 45 |
-
#
|
| 46 |
-
#
|
| 47 |
-
#
|
| 48 |
-
#
|
|
|
|
| 49 |
#
|
| 50 |
-
pipe =
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
"fal/Qwen-Image-Edit-2511-Multiple-Angles-LoRA",
|
| 67 |
-
weight_name="qwen-image-edit-2511-multiple-angles-lora.safetensors",
|
| 68 |
-
adapter_name="angles",
|
| 69 |
-
)
|
| 70 |
-
pipe.set_adapters(["lightning", "angles"], adapter_weights=[1.0, 1.0])
|
| 71 |
-
return pipe
|
| 72 |
|
| 73 |
|
| 74 |
# ββ Camera parameter tables ββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
|
@@ -134,9 +127,7 @@ def infer_camera_edit(
|
|
| 134 |
|
| 135 |
generator = torch.Generator(device=device).manual_seed(seed)
|
| 136 |
prompt = build_camera_prompt(azimuth, elevation, distance)
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
result = model(
|
| 140 |
image=image,
|
| 141 |
prompt=prompt,
|
| 142 |
height=height,
|
|
@@ -513,7 +504,7 @@ def create_app():
|
|
| 513 |
with gr.Accordion("β Generation Settings", open=False):
|
| 514 |
seed_slider = gr.Slider(0, MAX_SEED, value=42, step=1, label="Seed")
|
| 515 |
rand_seed_cb = gr.Checkbox(True, label="Randomise seed each generation")
|
| 516 |
-
guidance_sl = gr.Slider(1.0, 20.0, value=
|
| 517 |
steps_sl = gr.Slider(1, 50, value=4, step=1, label="Inference Steps")
|
| 518 |
width_sl = gr.Slider(256, 1024, value=1024, step=32, label="Width (px)")
|
| 519 |
height_sl = gr.Slider(256, 1024, value=1024, step=32, label="Height (px)")
|
|
|
|
|
|
|
|
|
|
| 1 |
"""
|
| 2 |
3D Camera View Generator
|
| 3 |
- Qwen Image Edit + Lightning LoRA + Multi-Angle LoRA
|
|
|
|
| 32 |
dtype = torch.bfloat16
|
| 33 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 34 |
|
| 35 |
+
# ββ Model Loading ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 36 |
#
|
| 37 |
# HOW MODEL LOADING WORKS ON ZEROGPU
|
| 38 |
# ββββββββββββββββββββββββββββββββββββ
|
| 39 |
+
# β’ Model is loaded at module-level (app startup), NOT lazily on first request.
|
| 40 |
+
# This means weights are deserialised from disk and resident in CPU memory
|
| 41 |
+
# before any user ever hits the space.
|
| 42 |
+
# β’ When @spaces.GPU fires, ZeroGPU pins those already-resident CPU tensors
|
| 43 |
+
# to CUDA β a fast hostβdevice transfer, not a disk read.
|
| 44 |
+
# β’ Lazy loading (inside @spaces.GPU) would incur disk I/O + deserialisation
|
| 45 |
+
# on the first call, adding several extra seconds to the cold start.
|
| 46 |
+
# β’ Between calls the GPU is released by ZeroGPU, but the weights stay in
|
| 47 |
+
# CPU memory so the next call only pays the H2D transfer cost.
|
| 48 |
#
|
| 49 |
+
pipe = QwenImageEditPlusPipeline.from_pretrained(
|
| 50 |
+
"Qwen/Qwen-Image-Edit-2511",
|
| 51 |
+
torch_dtype=dtype,
|
| 52 |
+
).to(device)
|
| 53 |
+
|
| 54 |
+
pipe.load_lora_weights(
|
| 55 |
+
"lightx2v/Qwen-Image-Edit-2511-Lightning",
|
| 56 |
+
weight_name="Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors",
|
| 57 |
+
adapter_name="lightning",
|
| 58 |
+
)
|
| 59 |
+
pipe.load_lora_weights(
|
| 60 |
+
"fal/Qwen-Image-Edit-2511-Multiple-Angles-LoRA",
|
| 61 |
+
weight_name="qwen-image-edit-2511-multiple-angles-lora.safetensors",
|
| 62 |
+
adapter_name="angles",
|
| 63 |
+
)
|
| 64 |
+
pipe.set_adapters(["lightning", "angles"], adapter_weights=[1.0, 1.0])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
|
| 66 |
|
| 67 |
# ββ Camera parameter tables ββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
|
|
|
| 127 |
|
| 128 |
generator = torch.Generator(device=device).manual_seed(seed)
|
| 129 |
prompt = build_camera_prompt(azimuth, elevation, distance)
|
| 130 |
+
result = pipe(
|
|
|
|
|
|
|
| 131 |
image=image,
|
| 132 |
prompt=prompt,
|
| 133 |
height=height,
|
|
|
|
| 504 |
with gr.Accordion("β Generation Settings", open=False):
|
| 505 |
seed_slider = gr.Slider(0, MAX_SEED, value=42, step=1, label="Seed")
|
| 506 |
rand_seed_cb = gr.Checkbox(True, label="Randomise seed each generation")
|
| 507 |
+
guidance_sl = gr.Slider(1.0, 20.0, value=1.0, step=0.1, label="Guidance Scale (keep β€1 for Lightning LoRA)")
|
| 508 |
steps_sl = gr.Slider(1, 50, value=4, step=1, label="Inference Steps")
|
| 509 |
width_sl = gr.Slider(256, 1024, value=1024, step=32, label="Width (px)")
|
| 510 |
height_sl = gr.Slider(256, 1024, value=1024, step=32, label="Height (px)")
|