Spaces:
Runtime error
Runtime error
File size: 8,501 Bytes
f770871 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 |
Got itโhereโs a cleaned-up version of your script with the extra LoRA wired **only into the low-noise stage (`transformer_2`)** of Wan 2.2. Iโve kept everything else intact and added the adapter with a clear name so you can tweak weights later if you want.
```python
# PyTorch 2.8 (temporary hack)
import os
os.system('pip install --upgrade --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu126 "torch<2.9" spaces')
# Actual demo code
import spaces
import torch
from diffusers import WanPipeline, AutoencoderKLWan
from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
from diffusers.utils.export_utils import export_to_video
import gradio as gr
import tempfile
import numpy as np
from PIL import Image
import random
import gc
from optimization import optimize_pipeline_
MODEL_ID = "Wan-AI/Wan2.2-T2V-A14B-Diffusers"
LANDSCAPE_WIDTH = 832
LANDSCAPE_HEIGHT = 480
MAX_SEED = np.iinfo(np.int32).max
FIXED_FPS = 16
MIN_FRAMES_MODEL = 8
MAX_FRAMES_MODEL = 81
MIN_DURATION = round(MIN_FRAMES_MODEL / FIXED_FPS, 1)
MAX_DURATION = round(MAX_FRAMES_MODEL / FIXED_FPS, 1)
# Build the pipeline (bf16 on GPU)
vae = AutoencoderKLWan.from_pretrained(
MODEL_ID, subfolder="vae", torch_dtype=torch.float32
)
pipe = WanPipeline.from_pretrained(
MODEL_ID,
transformer=WanTransformer3DModel.from_pretrained(
"linoyts/Wan2.2-T2V-A14B-Diffusers-BF16",
subfolder="transformer",
torch_dtype=torch.bfloat16,
device_map="cuda",
),
transformer_2=WanTransformer3DModel.from_pretrained(
"linoyts/Wan2.2-T2V-A14B-Diffusers-BF16",
subfolder="transformer_2",
torch_dtype=torch.bfloat16,
device_map="cuda",
),
vae=vae,
torch_dtype=torch.bfloat16,
).to("cuda")
# ---- NEW: Load Orbit-Shot LoRA on the LOW-NOISE stage only (transformer_2) ----
# Repo: ostris/wan22_i2v_14b_orbit_shot_lora
# File: wan22_14b_i2v_orbit_low_noise.safetensors
#
# Notes:
# - We attach this LoRA only to transformer_2 (the low-noise stage for Wan2.2).
# - Adapter name `orbit_low` lets you adjust/enable later via set_adapters if needed.
pipe.load_lora_weights(
"ostris/wan22_i2v_14b_orbit_shot_lora",
weight_name="wan22_14b_i2v_orbit_low_noise.safetensors",
adapter_name="orbit_low",
components=["transformer_2"], # IMPORTANT: apply only to low-noise stage
)
# Activate the adapter on transformer_2 with weight 1.0 (changeable)
pipe.set_adapters(
["orbit_low"], adapter_weights=[1.0], components=["transformer_2"]
)
# -------------------------------------------------------------------------------
# Usual CUDA cleanup
for _ in range(3):
gc.collect()
torch.cuda.synchronize()
torch.cuda.empty_cache()
# Keep your current optimization hook
optimize_pipeline_(
pipe,
prompt="prompt",
height=LANDSCAPE_HEIGHT,
width=LANDSCAPE_WIDTH,
num_frames=MAX_FRAMES_MODEL,
)
default_prompt_t2v = "Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage."
default_negative_prompt = "่ฒ่ฐ่ณไธฝ, ่ฟๆ, ้ๆ, ็ป่ๆจก็ณไธๆธ
, ๅญๅน, ้ฃๆ ผ, ไฝๅ, ็ปไฝ, ็ป้ข, ้ๆญข, ๆดไฝๅ็ฐ, ๆๅทฎ่ดจ้, ไฝ่ดจ้, JPEGๅ็ผฉๆฎ็, ไธ้็, ๆฎ็ผบ็, ๅคไฝ็ๆๆ, ็ปๅพไธๅฅฝ็ๆ้จ, ็ปๅพไธๅฅฝ็่ธ้จ, ็ธๅฝข็, ๆฏๅฎน็, ๅฝขๆ็ธๅฝข็่ขไฝ, ๆๆ่ๅ, ้ๆญขไธๅจ็็ป้ข, ๆไนฑ็่ๆฏ, ไธๆก่
ฟ, ่ๆฏไบบๅพๅค, ๅ็่ตฐ"
def get_duration(
prompt,
negative_prompt,
duration_seconds,
guidance_scale,
guidance_scale_2,
steps,
seed,
randomize_seed,
progress,
):
return steps * 15
@spaces.GPU(duration=get_duration)
def generate_video(
prompt,
negative_prompt=default_negative_prompt,
duration_seconds=MAX_DURATION,
guidance_scale=1,
guidance_scale_2=3,
steps=4,
seed=42,
randomize_seed=False,
progress=gr.Progress(track_tqdm=True),
):
"""
Generate a video from a text prompt using the Wan 2.2 14B T2V model with a low-noise Orbit-Shot LoRA adapter.
"""
num_frames = np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
output_frames_list = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
height=480,
width=832,
num_frames=num_frames,
guidance_scale=float(guidance_scale),
guidance_scale_2=float(guidance_scale_2),
num_inference_steps=int(steps),
generator=torch.Generator(device="cuda").manual_seed(current_seed),
).frames[0]
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
video_path = tmpfile.name
export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
return video_path, current_seed
with gr.Blocks() as demo:
gr.Markdown("# Fast 4 steps Wan 2.2 T2V (14B) + Orbit-Shot LoRA (low-noise)")
gr.Markdown(
"Runs Wan 2.2 in 4โ8 steps. Low-noise **Orbit-Shot** LoRA is applied to `transformer_2` only."
)
with gr.Row():
with gr.Column():
prompt_input = gr.Textbox(label="Prompt", value=default_prompt_t2v)
duration_seconds_input = gr.Slider(
minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=MAX_DURATION,
label="Duration (seconds)",
info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps."
)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=4, label="Inference Steps")
guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1, label="Guidance Scale - high noise stage")
guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=3, label="Guidance Scale 2 - low noise stage")
generate_button = gr.Button("Generate Video", variant="primary")
with gr.Column():
video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
ui_inputs = [
prompt_input,
negative_prompt_input,
duration_seconds_input,
guidance_scale_input,
guidance_scale_2_input,
steps_slider,
seed_input,
randomize_seed_checkbox,
]
generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
gr.Examples(
examples=[
[
"POV selfie video, white cat with sunglasses standing on surfboard, relaxed smile, tropical beach behind (clear water, green hills, blue sky with clouds). Surfboard tips, cat falls into ocean, camera plunges underwater with bubbles and sunlight beams. Brief underwater view of catโs face, then cat resurfaces, still filming selfie, playful summer vacation mood.",
],
[
"Two anthropomorphic cats in comfy boxing gear and bright gloves fight intensely on a spotlighted stage.",
],
[
"A cinematic shot of a boat sailing on a calm sea at sunset.",
],
[
"Drone footage flying over a futuristic city with flying cars.",
],
],
inputs=[prompt_input],
outputs=[video_output, seed_input],
fn=generate_video,
cache_examples="lazy",
)
if __name__ == "__main__":
demo.queue().launch(mcp_server=True)
```
### Notes
* The key lines are the `pipe.load_lora_weights(..., components=["transformer_2"])` and `pipe.set_adapters(..., components=["transformer_2"])`, which ensure the **orbit-shot LoRA only affects the low-noise stage**.
* If you want to dial the LoRAโs strength, change `adapter_weights=[1.0]` to something like `0.6โ1.2`.
* If you also plan to stack a โLightningโ LoRA, you can load it with a different `adapter_name` (e.g., `"lightning"`) and then call a combined `set_adapters` where `components=["transformer", "transformer_2"]` for lightning and only `["transformer_2"]` for orbit.
|