File size: 15,622 Bytes
f75b9e5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49f760b
 
f75b9e5
49f760b
f75b9e5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49f760b
 
 
 
6facf69
49f760b
 
 
 
 
 
 
 
 
 
 
 
f75b9e5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cceabe7
946e5cc
cceabe7
 
 
 
946e5cc
c55ed31
f75b9e5
 
 
4153942
 
f75b9e5
4153942
f75b9e5
 
 
4153942
f75b9e5
4153942
f75b9e5
4153942
f75b9e5
4153942
f75b9e5
4153942
f75b9e5
4153942
f75b9e5
 
 
 
 
4153942
f75b9e5
4153942
 
 
 
f75b9e5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
723c73b
f75b9e5
 
 
 
117fc91
f75b9e5
 
 
 
c55ed31
f75b9e5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
"""
Game Editing โ€“ G-Buffer conditioned stylized video generation
Hugging Face Space (ZeroGPU) version.
"""

import os
import sys
import torch
import tempfile
import spaces
import gradio as gr
from PIL import Image
from huggingface_hub import hf_hub_download

# โ”€โ”€ download models on CPU at startup โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
HF_TOKEN = os.environ.get("HF_TOKEN", None)

print("Downloading models...")
CKPT_PATH = hf_hub_download(
    repo_id="Brian9999/game-editing",
    filename="model.safetensors",
    token=HF_TOKEN,
)
T5_PATH = hf_hub_download(
    repo_id="Wan-AI/Wan2.1-T2V-1.3B",
    filename="models_t5_umt5-xxl-enc-bf16.pth",
)
VAE_PATH = hf_hub_download(
    repo_id="Wan-AI/Wan2.1-T2V-1.3B",
    filename="Wan2.1_VAE.pth",
)
print("Models downloaded.")

# Patch: diffsynth/transformers indirectly imports torchaudio which fails on ZeroGPU.
# We create a proper mock so importlib.util.find_spec doesn't choke on __spec__=None.
import types, importlib
_mock = types.ModuleType("torchaudio")
_mock.__spec__ = importlib.machinery.ModuleSpec("torchaudio", None)
_mock.__version__ = "0.0.0"
sys.modules["torchaudio"] = _mock

from diffsynth.utils.data import save_video, VideoData
from diffsynth.pipelines.wan_video import WanVideoPipeline, ModelConfig
from diffsynth.models.model_loader import MODEL_CONFIGS
from gbuffer_utils import expand_patch_embedding, inject_gbuffer_unit

# Register the fine-tuned GBuffer DiT model hash (not in PyPI diffsynth).
# in_dim=96 = 16 (base) + 5*16 (gbuffer latents for albedo/depth/metallic/normal/roughness)
_GBUFFER_DIT_CONFIG = {
    "model_hash": "a87823c16fa5119ca6ef32cefc0be86d",
    "model_name": "wan_video_dit",
    "model_class": "diffsynth.models.wan_video_dit.WanModel",
    "extra_kwargs": {
        "has_image_input": False,
        "patch_size": [1, 2, 2],
        "in_dim": 96,
        "dim": 1536,
        "ffn_dim": 8960,
        "freq_dim": 256,
        "text_dim": 4096,
        "out_dim": 16,
        "num_heads": 12,
        "num_layers": 30,
        "eps": 1e-06,
    },
}
if not any(c["model_hash"] == _GBUFFER_DIT_CONFIG["model_hash"] for c in MODEL_CONFIGS):
    MODEL_CONFIGS.append(_GBUFFER_DIT_CONFIG)
    print("Registered GBuffer DiT model config.")

# โ”€โ”€ constants โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
NEGATIVE_PROMPT = (
    "่‰ฒ่ฐƒ่‰ณไธฝ๏ผŒ่ฟ‡ๆ›๏ผŒ้™ๆ€๏ผŒ็ป†่Š‚ๆจก็ณŠไธๆธ…๏ผŒๅญ—ๅน•๏ผŒ้ฃŽๆ ผ๏ผŒไฝœๅ“๏ผŒ็”ปไฝœ๏ผŒ็”ป้ข๏ผŒ้™ๆญข๏ผŒๆ•ดไฝ“ๅ‘็ฐ๏ผŒ"
    "ๆœ€ๅทฎ่ดจ้‡๏ผŒไฝŽ่ดจ้‡๏ผŒJPEGๅŽ‹็ผฉๆฎ‹็•™๏ผŒไธ‘้™‹็š„๏ผŒๆฎ‹็ผบ็š„๏ผŒๅคšไฝ™็š„ๆ‰‹ๆŒ‡๏ผŒ็”ปๅพ—ไธๅฅฝ็š„ๆ‰‹้ƒจ๏ผŒ"
    "็”ปๅพ—ไธๅฅฝ็š„่„ธ้ƒจ๏ผŒ็•ธๅฝข็š„๏ผŒๆฏๅฎน็š„๏ผŒๅฝขๆ€็•ธๅฝข็š„่‚ขไฝ“๏ผŒๆ‰‹ๆŒ‡่žๅˆ๏ผŒ้™ๆญขไธๅŠจ็š„็”ป้ข๏ผŒ"
    "ๆ‚ไนฑ็š„่ƒŒๆ™ฏ๏ผŒไธ‰ๆก่…ฟ๏ผŒ่ƒŒๆ™ฏไบบๅพˆๅคš๏ผŒๅ€’็€่ตฐ"
)

PRESET_STYLES = {
    "(custom)": "",
    "bright_sunny": "the scene is bathed in bright, warm golden sunlight under a clear blue sky, creating a serene and radiant atmosphere.",
    "snowy_winter": "the scene is set in a frozen, snow-covered environment under cold, pale winter light with falling snowflakes, creating a silent and ethereal winter wonderland atmosphere.",
    "sunset_dramatic": "the scene is bathed in dramatic sunset light under a deep orange and crimson sky, with long shadows and volumetric god rays, creating an epic and cinematic atmosphere.",
    "cyberpunk_neon": "the scene is illuminated by vibrant pink and blue neon lights with glowing holographic particles and electric haze, creating a futuristic cyberpunk atmosphere.",
    "underwater": "the scene is submerged deep underwater under soft, filtered aquamarine light, with shimmering caustics and drifting bubbles, creating a mysterious and tranquil deep-sea atmosphere.",
    "autumn_warm": "the scene is filled with vibrant autumn foliage and golden and crimson fallen leaves under warm amber afternoon light, creating a cozy and nostalgic autumn atmosphere.",
    "moonlit_night": "the scene is set under a brilliant full moon, bathed in cool silver moonlight with fireflies dancing in the darkness and stars visible overhead, creating a mystical and enchanting nighttime atmosphere.",
}


def read_video_frames(video_path, num_frames, width, height):
    frames = VideoData(video_path).raw_data()
    frames = [f.resize((width, height), Image.LANCZOS) for f in frames[:num_frames]]
    return frames


# โ”€โ”€ Pre-load pipeline on CPU at startup (free, no GPU needed) โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
print("Building pipeline on CPU...")
pipe = WanVideoPipeline.from_pretrained(
    torch_dtype=torch.bfloat16,
    device="cpu",
    model_configs=[
        ModelConfig(CKPT_PATH),
        ModelConfig(T5_PATH),
        ModelConfig(VAE_PATH),
    ],
)
expand_patch_embedding(pipe, num_gbuffers=5)
inject_gbuffer_unit(pipe)
print("Pipeline ready on CPU.")


# โ”€โ”€ CPU pre-processing (outside @spaces.GPU to save GPU quota) โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
def prepare_inputs(
    albedo_video, depth_video, metallic_video, normal_video, roughness_video,
    prompt, style_preset, num_frames, height, width,
):
    gbuffer_paths = [albedo_video, depth_video, metallic_video, normal_video, roughness_video]
    names = ["Albedo", "Depth", "Metallic", "Normal", "Roughness"]
    for i, path in enumerate(gbuffer_paths):
        if path is None:
            raise gr.Error(f"Missing G-buffer: {names[i]}")

    # build prompt
    if style_preset and style_preset != "(custom)":
        style_text = PRESET_STYLES[style_preset]
        final_prompt = f"{prompt.rstrip('.')}; {style_text}" if prompt.strip() else style_text
    else:
        if not prompt.strip():
            raise gr.Error("Please enter a prompt or select a style preset.")
        final_prompt = prompt

    num_frames = int(num_frames)
    height = int(height)
    width = int(width)

    gbuffer_videos = [read_video_frames(p, num_frames, width, height) for p in gbuffer_paths]
    return final_prompt, gbuffer_videos, num_frames, height, width


# โ”€โ”€ inference (ZeroGPU: GPU allocated only during this call) โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
@spaces.GPU(duration=180)
def generate(
    albedo_video, depth_video, metallic_video, normal_video, roughness_video,
    prompt, style_preset,
    num_frames, height, width, seed, cfg_scale, num_inference_steps,
    progress=gr.Progress(track_tqdm=True),
):
    # CPU pre-processing (validation, video loading, prompt building)
    final_prompt, gbuffer_videos, num_frames, height, width = prepare_inputs(
        albedo_video, depth_video, metallic_video, normal_video, roughness_video,
        prompt, style_preset, num_frames, height, width,
    )
    seed = int(seed)

    # Move all model weights from CPU to GPU
    pipe.to("cuda")
    pipe.device = "cuda"

    tiled = True
    tile_size = (30, 52)
    tile_stride = (15, 26)

    pipe.scheduler.set_timesteps(num_inference_steps, denoising_strength=1.0, shift=5.0)
    inputs_posi = {"prompt": final_prompt}
    inputs_nega = {"negative_prompt": NEGATIVE_PROMPT}
    inputs_shared = {
        "input_image": None, "end_image": None,
        "input_video": None, "denoising_strength": 1.0,
        "control_video": None, "reference_image": None,
        "vace_video": None, "vace_video_mask": None, "vace_reference_image": None, "vace_scale": 1.0,
        "seed": seed, "rand_device": "cpu",
        "height": height, "width": width, "num_frames": num_frames,
        "cfg_scale": cfg_scale, "cfg_merge": False,
        "sigma_shift": 5.0,
        "motion_bucket_id": None, "longcat_video": None,
        "tiled": tiled, "tile_size": tile_size, "tile_stride": tile_stride,
        "sliding_window_size": None, "sliding_window_stride": None,
        "input_audio": None, "audio_sample_rate": 16000,
        "s2v_pose_video": None, "audio_embeds": None, "s2v_pose_latents": None, "motion_video": None,
        "animate_pose_video": None, "animate_face_video": None, "animate_inpaint_video": None, "animate_mask_video": None,
        "vap_video": None,
        "gbuffer_videos": gbuffer_videos,
    }

    for unit in pipe.units:
        inputs_shared, inputs_posi, inputs_nega = pipe.unit_runner(unit, pipe, inputs_shared, inputs_posi, inputs_nega)

    pipe.load_models_to_device(pipe.in_iteration_models)
    models = {name: getattr(pipe, name) for name in pipe.in_iteration_models}

    from tqdm import tqdm
    with torch.no_grad():
        for progress_id, timestep in enumerate(tqdm(pipe.scheduler.timesteps, desc="Generating")):
            timestep = timestep.unsqueeze(0).to(dtype=pipe.torch_dtype, device="cuda")
            noise_pred_posi = pipe.model_fn(**models, **inputs_shared, **inputs_posi, timestep=timestep)
            if cfg_scale != 1.0:
                noise_pred_nega = pipe.model_fn(**models, **inputs_shared, **inputs_nega, timestep=timestep)
                noise_pred = noise_pred_nega + cfg_scale * (noise_pred_posi - noise_pred_nega)
            else:
                noise_pred = noise_pred_posi
            inputs_shared["latents"] = pipe.scheduler.step(
                noise_pred, pipe.scheduler.timesteps[progress_id], inputs_shared["latents"]
            )

    pipe.load_models_to_device(["vae"])
    with torch.no_grad():
        video = pipe.vae.decode(
            inputs_shared["latents"], device="cuda",
            tiled=tiled, tile_size=tile_size, tile_stride=tile_stride,
        )
    video = pipe.vae_output_to_video(video)

    out_path = os.path.join(tempfile.mkdtemp(), "output.mp4")
    save_video(video, out_path, fps=15, quality=5)

    return out_path


# โ”€โ”€ UI โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
MODALITIES = ["Albedo", "Depth", "Metallic", "Normal", "Roughness"]


def on_style_change(style):
    if style and style != "(custom)":
        return PRESET_STYLES[style]
    return ""


def update_status(*videos):
    """Return a status string showing which G-buffers have been uploaded."""
    uploaded = sum(1 for v in videos if v is not None)
    missing = [name for name, v in zip(MODALITIES, videos) if v is None]
    if uploaded == 5:
        return "**All 5 G-buffers uploaded. Ready to generate!**"
    else:
        return f"**Uploaded {uploaded}/5 G-buffers.** Missing: {', '.join(missing)}"


with gr.Blocks(title="Game Editing", theme=gr.themes.Soft()) as demo:
    gr.Markdown("# Game Editing")
    gr.Markdown(
        "> **Quick Start:** Click one of the **Examples** below to auto-fill G-buffer videos and style, then hit **Generate**.\n>\n"
        "> **Note:** The entire generation process takes approximately **180 seconds**. "
        "For better results, increase the *Inference Steps* if you have sufficient GPU quota. "
        "In our paper, we use **50 steps**."
    )
    scroll_btn = gr.Button("Jump to Examples", variant="secondary", size="sm")
    scroll_btn.click(fn=None, js="() => { document.querySelector('#examples-anchor').scrollIntoView({behavior: 'smooth'}); }")
    status = gr.Markdown("**Upload 5 G-buffer videos, pick a style, and hit Generate. (0/5 uploaded)**")

    # โ”€โ”€ Top row: Input (left) + Output (right), videos aligned โ”€โ”€
    VIDEO_HEIGHT = 400 

    with gr.Row(equal_height=True):
        # ๅทฆไพง Column
        with gr.Column(scale=1):
            with gr.Tabs():
                with gr.Tab("Albedo"):
                    albedo = gr.Video(label="Albedo video", sources=["upload"], height=VIDEO_HEIGHT)
                with gr.Tab("Depth"):
                    depth = gr.Video(label="Depth video", sources=["upload"], height=VIDEO_HEIGHT)
                with gr.Tab("Metallic"):
                    metallic = gr.Video(label="Metallic video", sources=["upload"], height=VIDEO_HEIGHT)
                with gr.Tab("Normal"):
                    normal = gr.Video(label="Normal video", sources=["upload"], height=VIDEO_HEIGHT)
                with gr.Tab("Roughness"):
                    roughness = gr.Video(label="Roughness video", sources=["upload"], height=VIDEO_HEIGHT)

            # ไบ‹ไปถ็ป‘ๅฎš
            all_videos = [albedo, depth, metallic, normal, roughness]
            for v in all_videos:
                v.change(update_status, inputs=all_videos, outputs=[status])
                v.clear(update_status, inputs=all_videos, outputs=[status])

        # ๅณไพง Column
        with gr.Column(scale=1):
            # ๐Ÿ’ก ๆ ธๅฟƒๆŠ€ๅทง๏ผšๅณ่พนไนŸๅฅ—ไธŠ Tabs๏ผŒๅผบๅˆถ UI ้กถ้ƒจๅฏน้ฝ
            with gr.Tabs():
                with gr.Tab("Result"): 
                    output_video = gr.Video(label="Generated Video", height=VIDEO_HEIGHT)

    # โ”€โ”€ Bottom: Settings โ”€โ”€
    gr.Markdown("### Settings")
    with gr.Row():
        style_preset = gr.Dropdown(
            choices=list(PRESET_STYLES.keys()),
            value="bright_sunny",
            label="Style Preset",
            scale=1,
        )
        prompt = gr.Textbox(
            label="Prompt",
            value=PRESET_STYLES["bright_sunny"],
            lines=2,
            placeholder="Describe the scene and desired style...",
            scale=3,
        )
    with gr.Row():
        num_frames = gr.Slider(17, 81, value=73, step=4, label="Frames")
        seed = gr.Number(value=0, label="Seed", precision=0)
        height = gr.Dropdown([480, 720], value=480, label="Height")
        width = gr.Dropdown([832, 1280], value=832, label="Width")
        cfg_scale = gr.Slider(1.0, 10.0, value=5.0, step=0.5, label="CFG Scale")
        num_steps = gr.Slider(10, 100, value=20, step=5, label="Inference Steps")

    run_btn = gr.Button("Generate", variant="primary", size="lg")

    # โ”€โ”€ Examples โ”€โ”€
    gr.Markdown("### Examples", elem_id="examples-anchor")
    gr.Examples(
        examples=[
            [
                "examples/clip_0000_albedo.mp4",
                "examples/clip_0000_depth.mp4",
                "examples/clip_0000_metallic.mp4",
                "examples/clip_0000_normal.mp4",
                "examples/clip_0000_roughness.mp4",
                "bright_sunny",
            ],
            [
                "examples/clip_0000_albedo.mp4",
                "examples/clip_0000_depth.mp4",
                "examples/clip_0000_metallic.mp4",
                "examples/clip_0000_normal.mp4",
                "examples/clip_0000_roughness.mp4",
                "cyberpunk_neon",
            ],
            [
                "examples/clip_0000_albedo.mp4",
                "examples/clip_0000_depth.mp4",
                "examples/clip_0000_metallic.mp4",
                "examples/clip_0000_normal.mp4",
                "examples/clip_0000_roughness.mp4",
                "snowy_winter",
            ],
        ],
        inputs=[
            albedo, depth, metallic, normal, roughness,
            style_preset,
        ],
        label="Click an example to load G-buffer videos and style preset",
    )

    style_preset.change(on_style_change, inputs=[style_preset], outputs=[prompt])

    run_btn.click(
        fn=generate,
        inputs=[
            albedo, depth, metallic, normal, roughness,
            prompt, style_preset,
            num_frames, height, width, seed, cfg_scale, num_steps,
        ],
        outputs=[output_video],
    )

if __name__ == "__main__":
    demo.launch(server_name="0.0.0.0", server_port=7860)