linoyts HF Staff commited on
Commit
df3eb67
·
verified ·
1 Parent(s): e938209

add 3d component (#45)

Browse files

- add 3d component (297bf429291a4e03d06b9e3d3b69c023b2d8b744)
- Update app.py (2a864409db36ee1a6ac50c5683f9a7b888647209)
- Update app.py (dde57b5ae3847fcef8ca889adadb7ee871db70dc)
- Update app.py (c1c39c14964e7ab74531d27bdaec6003f4b21380)

Files changed (1) hide show
  1. app.py +648 -392
app.py CHANGED
@@ -3,21 +3,16 @@ import numpy as np
3
  import random
4
  import torch
5
  import spaces
 
 
6
 
7
  from PIL import Image
8
  from diffusers import FlowMatchEulerDiscreteScheduler
9
- # from optimization import optimize_pipeline_
10
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
11
  from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
12
- # from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
13
 
14
- import math
15
- from huggingface_hub import hf_hub_download
16
- from safetensors.torch import load_file
17
-
18
- from PIL import Image
19
  import os
20
- import gradio as gr
21
  from gradio_client import Client, handle_file
22
  import tempfile
23
  from typing import Optional, Tuple, Any
@@ -50,15 +45,6 @@ pipe.unload_lora_weights()
50
 
51
  spaces.aoti_blocks_load(pipe.transformer, "zerogpu-aoti/Qwen-Image", variant="fa3")
52
 
53
- #pipe.transformer.__class__ = QwenImageTransformer2DModel
54
- #pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
55
-
56
- #optimize_pipeline_(
57
- # pipe,
58
- # image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))],
59
- # prompt="prompt"
60
- #)
61
-
62
  MAX_SEED = np.iinfo(np.int32).max
63
 
64
 
@@ -68,29 +54,7 @@ def _generate_video_segment(
68
  prompt: str,
69
  request: gr.Request
70
  ) -> str:
71
- """
72
- Generate a single video segment between two frames by calling an external
73
- Wan 2.2 image-to-video service hosted on Hugging Face Spaces.
74
-
75
- This helper function is used internally when the user asks to create
76
- a video between the input and output images.
77
-
78
- Args:
79
- input_image_path (str):
80
- Path to the starting frame image on disk.
81
- output_image_path (str):
82
- Path to the ending frame image on disk.
83
- prompt (str):
84
- Text prompt describing the camera movement / transition.
85
- request (gr.Request):
86
- Gradio request object, used here to forward the `x-ip-token`
87
- header to the downstream Space for authentication/rate limiting.
88
-
89
- Returns:
90
- str:
91
- A string returned by the external service, usually a URL or path
92
- to the generated video.
93
- """
94
  x_ip_token = request.headers['x-ip-token']
95
  video_client = Client(
96
  "multimodalart/wan-2-2-first-last-frame",
@@ -111,34 +75,9 @@ def build_camera_prompt(
111
  vertical_tilt: float = 0.0,
112
  wideangle: bool = False
113
  ) -> str:
114
- """
115
- Build a camera movement prompt based on the chosen controls.
116
-
117
- This converts the provided control values into a prompt instruction with the corresponding trigger words for the multiple-angles LoRA.
118
-
119
- Args:
120
- rotate_deg (float, optional):
121
- Horizontal rotation in degrees. Positive values rotate left,
122
- negative values rotate right. Defaults to 0.0.
123
- move_forward (float, optional):
124
- Forward movement / zoom factor. Larger values imply moving the
125
- camera closer or into a close-up. Defaults to 0.0.
126
- vertical_tilt (float, optional):
127
- Vertical angle of the camera:
128
- - Negative ≈ bird's-eye view
129
- - Positive ≈ worm's-eye view
130
- Defaults to 0.0.
131
- wideangle (bool, optional):
132
- Whether to switch to a wide-angle lens style. Defaults to False.
133
-
134
- Returns:
135
- str:
136
- A text prompt describing the camera motion. If no controls are
137
- active, returns `"no camera movement"`.
138
- """
139
  prompt_parts = []
140
 
141
- # Rotation
142
  if rotate_deg != 0:
143
  direction = "left" if rotate_deg > 0 else "right"
144
  if direction == "left":
@@ -150,21 +89,18 @@ def build_camera_prompt(
150
  f"将镜头向右旋转{abs(rotate_deg)}度 Rotate the camera {abs(rotate_deg)} degrees to the right."
151
  )
152
 
153
- # Move forward / close-up
154
  if move_forward > 5:
155
  prompt_parts.append("将镜头转为特写镜头 Turn the camera to a close-up.")
156
  elif move_forward >= 1:
157
  prompt_parts.append("将镜头向前移动 Move the camera forward.")
158
 
159
- # Vertical tilt
160
  if vertical_tilt <= -1:
161
  prompt_parts.append("将相机转向鸟瞰视角 Turn the camera to a bird's-eye view.")
162
  elif vertical_tilt >= 1:
163
  prompt_parts.append("将相机切换到仰视视角 Turn the camera to a worm's-eye view.")
164
 
165
- # Lens option
166
  if wideangle:
167
- prompt_parts.append(" 将镜头转为广角镜头 Turn the camera to a wide-angle lens.")
168
 
169
  final_prompt = " ".join(prompt_parts).strip()
170
  return final_prompt if final_prompt else "no camera movement"
@@ -185,55 +121,7 @@ def infer_camera_edit(
185
  width: Optional[int] = None,
186
  prev_output: Optional[Image.Image] = None,
187
  ) -> Tuple[Image.Image, int, str]:
188
- """
189
- Edit the camera angles/view of an image with Qwen Image Edit 2509 and dx8152's Qwen-Edit-2509-Multiple-angles LoRA.
190
-
191
- Applies a camera-style transformation (rotation, zoom, tilt, lens)
192
- to an input image.
193
-
194
- Args:
195
- image (PIL.Image.Image | None, optional):
196
- Input image to edit. If `None`, the function will instead try to
197
- use `prev_output`. At least one of `image` or `prev_output` must
198
- be available. Defaults to None.
199
- rotate_deg (float, optional):
200
- Horizontal rotation in degrees (-90, -45, 0, 45, 90). Positive values rotate
201
- to the left, negative to the right. Defaults to 0.0.
202
- move_forward (float, optional):
203
- Forward movement / zoom factor (0, 5, 10). Higher values move the
204
- camera closer; values >5 switch to a close-up style. Defaults to 0.0.
205
- vertical_tilt (float, optional):
206
- Vertical tilt (-1 to 1). -1 ≈ bird's-eye view, +1 ≈ worm's-eye view.
207
- Defaults to 0.0.
208
- wideangle (bool, optional):
209
- Whether to use a wide-angle lens style. Defaults to False.
210
- seed (int, optional):
211
- Random seed for the generation. Ignored if `randomize_seed=True`.
212
- Defaults to 0.
213
- randomize_seed (bool, optional):
214
- If True, a random seed (0..MAX_SEED) is chosen per call.
215
- Defaults to True.
216
- true_guidance_scale (float, optional):
217
- CFG / guidance scale controlling prompt adherence.
218
- Defaults to 1.0 since the demo is using a distilled transformer for faster inference.
219
- num_inference_steps (int, optional):
220
- Number of inference steps. Defaults to 4.
221
- height (int, optional):
222
- Output image height. Must typically be a multiple of 8.
223
- If set to 0, the model will infer a size. Defaults to 1024 if none is provided.
224
- width (int, optional):
225
- Output image width. Must typically be a multiple of 8.
226
- If set to 0, the model will infer a size. Defaults to 1024 if none is provided.
227
- prev_output (PIL.Image.Image | None, optional):
228
- Previous output image to use as input when no new image is uploaded.
229
- Defaults to None.
230
-
231
- Returns:
232
- Tuple[PIL.Image.Image, int, str]:
233
- - The edited output image.
234
- - The actual seed used for generation.
235
- - The constructed camera prompt string.
236
- """
237
  progress = gr.Progress(track_tqdm=True)
238
 
239
  prompt = build_camera_prompt(rotate_deg, move_forward, vertical_tilt, wideangle)
@@ -243,7 +131,6 @@ def infer_camera_edit(
243
  seed = random.randint(0, MAX_SEED)
244
  generator = torch.Generator(device=device).manual_seed(seed)
245
 
246
- # Choose input image (prefer uploaded, else last output)
247
  pil_images = []
248
  if image is not None:
249
  if isinstance(image, Image.Image):
@@ -279,29 +166,7 @@ def create_video_between_images(
279
  prompt: str,
280
  request: gr.Request
281
  ) -> str:
282
- """
283
- Create a short transition video between the input and output images via the
284
- Wan 2.2 first-last-frame Space.
285
-
286
- Args:
287
- input_image (PIL.Image.Image | None):
288
- Starting frame image (the original / previous view).
289
- output_image (numpy.ndarray | None):
290
- Ending frame image - the output image with the the edited camera angles.
291
- prompt (str):
292
- The camera movement prompt used to describe the transition.
293
- request (gr.Request):
294
- Gradio request object, used to forward the `x-ip-token` header
295
- to the video generation app.
296
-
297
- Returns:
298
- str:
299
- a path pointing to the generated video.
300
-
301
- Raises:
302
- gr.Error:
303
- If either image is missing or if the video generation fails.
304
- """
305
  if input_image is None or output_image is None:
306
  raise gr.Error("Both input and output images are required to create a video.")
307
 
@@ -326,60 +191,499 @@ def create_video_between_images(
326
  raise gr.Error(f"Video generation failed: {e}")
327
 
328
 
329
- # --- UI ---
330
- css = '''#col-container { max-width: 800px; margin: 0 auto; }
331
- .dark .progress-text{color: white !important}
332
- #examples{max-width: 800px; margin: 0 auto; }'''
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333
 
334
 
335
- def reset_all() -> list:
336
- """
337
- Reset all camera control knobs and flags to their default values.
 
 
 
 
 
 
 
 
 
338
 
339
- This is used by the "Reset" button to set:
340
- - rotate_deg = 0
341
- - move_forward = 0
342
- - vertical_tilt = 0
343
- - wideangle = False
344
- - is_reset = True
345
 
346
- Returns:
347
- list:
348
- A list of values matching the order of the reset outputs:
349
- [rotate_deg, move_forward, vertical_tilt, wideangle, is_reset, True]
350
- """
351
- return [0, 0, 0, 0, False, True]
 
 
 
352
 
353
 
354
- def end_reset() -> bool:
355
- """
356
- Mark the end of a reset cycle.
357
 
358
- This helper is chained after `reset_all` to set the internal
359
- `is_reset` flag back to False, so that live inference can resume.
360
 
361
- Returns:
362
- bool:
363
- Always returns False.
364
- """
365
  return False
366
 
367
 
368
- def update_dimensions_on_upload(
369
- image: Optional[Image.Image]
370
- ) -> Tuple[int, int]:
371
- """
372
- Compute recommended (width, height) for the output resolution when an
373
- image is uploaded while preserveing the aspect ratio.
374
-
375
- Args:
376
- image (PIL.Image.Image | None):
377
- The uploaded image. If `None`, defaults to (1024, 1024).
378
-
379
- Returns:
380
- Tuple[int, int]:
381
- The new (width, height).
382
- """
383
  if image is None:
384
  return 1024, 1024
385
 
@@ -394,167 +698,160 @@ def update_dimensions_on_upload(
394
  aspect_ratio = original_width / original_height
395
  new_width = int(new_height * aspect_ratio)
396
 
397
- # Ensure dimensions are multiples of 8
398
  new_width = (new_width // 8) * 8
399
  new_height = (new_height // 8) * 8
400
 
401
  return new_width, new_height
402
 
403
 
404
- with gr.Blocks() as demo:
405
- with gr.Column(elem_id="col-container"):
406
- gr.Markdown("## 🎬 Qwen Image Edit — Camera Angle Control")
407
- gr.Markdown("""
408
- Qwen Image Edit 2509 for Camera Control ✨
409
- Using [dx8152's Qwen-Edit-2509-Multiple-angles LoRA](https://huggingface.co/dx8152/Qwen-Edit-2509-Multiple-angles) and [Phr00t/Qwen-Image-Edit-Rapid-AIO](https://huggingface.co/Phr00t/Qwen-Image-Edit-Rapid-AIO/tree/main) for 4-step inference 💨
410
- """
411
- )
412
-
413
- with gr.Row():
414
- with gr.Column():
415
- image = gr.Image(label="Input Image", type="pil")
416
- prev_output = gr.Image(value=None, visible=False)
417
- is_reset = gr.Checkbox(value=False, visible=False)
418
-
419
- with gr.Tab("Camera Controls"):
420
- rotate_deg = gr.Slider(
421
- label="Rotate Right-Left (degrees °)",
422
- minimum=-90,
423
- maximum=90,
424
- step=45,
425
- value=0
426
- )
427
- move_forward = gr.Slider(
428
- label="Move Forward → Close-Up",
429
- minimum=0,
430
- maximum=10,
431
- step=5,
432
- value=0
433
- )
434
- vertical_tilt = gr.Slider(
435
- label="Vertical Angle (Bird ↔ Worm)",
436
- minimum=-1,
437
- maximum=1,
438
- step=1,
439
- value=0
440
- )
441
- wideangle = gr.Checkbox(label="Wide-Angle Lens", value=False)
442
- with gr.Row():
443
- reset_btn = gr.Button("Reset")
444
- run_btn = gr.Button("Generate", variant="primary")
445
-
446
- with gr.Accordion("Advanced Settings", open=False):
447
- seed = gr.Slider(
448
- label="Seed",
449
- minimum=0,
450
- maximum=MAX_SEED,
451
- step=1,
452
- value=0
453
- )
454
- randomize_seed = gr.Checkbox(
455
- label="Randomize Seed",
456
- value=True
457
- )
458
- true_guidance_scale = gr.Slider(
459
- label="True Guidance Scale",
460
- minimum=1.0,
461
- maximum=10.0,
462
- step=0.1,
463
- value=1.0
464
- )
465
- num_inference_steps = gr.Slider(
466
- label="Inference Steps",
467
- minimum=1,
468
- maximum=40,
469
- step=1,
470
- value=4
471
- )
472
- height = gr.Slider(
473
- label="Height",
474
- minimum=256,
475
- maximum=2048,
476
- step=8,
477
- value=1024
478
- )
479
- width = gr.Slider(
480
- label="Width",
481
- minimum=256,
482
- maximum=2048,
483
- step=8,
484
- value=1024
485
- )
486
-
487
- with gr.Column():
488
- result = gr.Image(label="Output Image", interactive=False)
489
- prompt_preview = gr.Textbox(label="Processed Prompt", interactive=False)
490
- create_video_button = gr.Button(
491
- "🎥 Create Video Between Images",
492
- variant="secondary",
493
- visible=False
494
  )
495
- with gr.Group(visible=False) as video_group:
496
- video_output = gr.Video(
497
- label="Generated Video",
498
- buttons=["download"],
499
- autoplay=True
500
- )
501
-
502
- inputs = [
503
- image, rotate_deg, move_forward,
504
- vertical_tilt, wideangle,
505
- seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width, prev_output
506
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
507
  outputs = [result, seed, prompt_preview]
508
-
509
- # Reset behavior
510
- reset_btn.click(
511
- fn=reset_all,
512
- inputs=None,
513
- outputs=[rotate_deg, move_forward, vertical_tilt, wideangle, is_reset],
514
- queue=False
515
- ).then(fn=end_reset, inputs=None, outputs=[is_reset], queue=False)
516
-
517
- # Manual generation with video button visibility control
518
- def infer_and_show_video_button(*args: Any):
519
- """
520
- Wrapper around `infer_camera_edit` that also controls the visibility
521
- of the 'Create Video Between Images' button.
522
-
523
- The first argument in `args` is expected to be the input image; if both
524
- input and output images are present, the video button is shown.
525
-
526
- Args:
527
- *args:
528
- Positional arguments forwarded directly to `infer_camera_edit`.
529
-
530
- Returns:
531
- tuple:
532
- (output_image, seed, prompt, video_button_visibility_update)
533
- """
534
  result_img, result_seed, result_prompt = infer_camera_edit(*args)
535
- # Show video button if we have both input and output images
536
  show_button = args[0] is not None and result_img is not None
537
  return result_img, result_seed, result_prompt, gr.update(visible=show_button)
538
-
539
- run_event = run_btn.click(
540
- fn=infer_and_show_video_button,
541
- inputs=inputs,
 
 
 
 
 
 
 
 
 
 
 
 
542
  outputs=outputs + [create_video_button]
543
  )
544
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
545
  # Video creation
546
- create_video_button.click(
547
- fn=lambda: gr.update(visible=True),
548
- outputs=[video_group],
549
- api_visibility="private"
550
- ).then(
551
- fn=create_video_between_images,
552
- inputs=[image, result, prompt_preview],
553
- outputs=[video_output],
554
- api_visibility="private"
555
- )
 
 
 
 
 
 
 
 
556
 
557
- # Examples
558
  gr.Examples(
559
  examples=[
560
  ["tool_of_the_sea.png", 90, 0, 0, False, 0, True, 1.0, 4, 568, 1024],
@@ -563,72 +860,31 @@ with gr.Blocks() as demo:
563
  ["disaster_girl.jpg", -45, 0, 1, False, 0, True, 1.0, 4, 768, 1024],
564
  ["grumpy.png", 90, 0, 1, False, 0, True, 1.0, 4, 576, 1024]
565
  ],
566
- inputs=[
567
- image, rotate_deg, move_forward,
568
- vertical_tilt, wideangle,
569
- seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width
570
- ],
571
  outputs=outputs,
572
  fn=infer_camera_edit,
573
  cache_examples=True,
574
  cache_mode="lazy",
575
  elem_id="examples"
576
  )
577
-
578
- # Image upload triggers dimension update and control reset
579
- image.upload(
580
- fn=update_dimensions_on_upload,
581
- inputs=[image],
582
- outputs=[width, height]
583
- ).then(
584
- fn=reset_all,
585
- inputs=None,
586
- outputs=[rotate_deg, move_forward, vertical_tilt, wideangle, is_reset],
587
- queue=False
588
- ).then(
589
- fn=end_reset,
590
- inputs=None,
591
- outputs=[is_reset],
592
- queue=False
593
- )
594
-
595
- # Live updates
596
- def maybe_infer(
597
- is_reset: bool,
598
- progress: gr.Progress = gr.Progress(track_tqdm=True),
599
- *args: Any
600
- ):
601
- if is_reset:
602
- return gr.update(), gr.update(), gr.update(), gr.update()
603
- else:
604
- result_img, result_seed, result_prompt = infer_camera_edit(*args)
605
- # Show video button if we have both input and output
606
- show_button = args[0] is not None and result_img is not None
607
- return result_img, result_seed, result_prompt, gr.update(visible=show_button)
608
-
609
- control_inputs = [
610
- image, rotate_deg, move_forward,
611
- vertical_tilt, wideangle,
612
- seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width, prev_output
613
- ]
614
- control_inputs_with_flag = [is_reset] + control_inputs
615
-
616
- for control in [rotate_deg, move_forward, vertical_tilt]:
617
- control.release(
618
- fn=maybe_infer,
619
- inputs=control_inputs_with_flag,
620
- outputs=outputs + [create_video_button]
621
- )
622
-
623
- wideangle.input(
624
- fn=maybe_infer,
625
- inputs=control_inputs_with_flag,
626
- outputs=outputs + [create_video_button]
627
- )
628
-
629
- run_event.then(lambda img, *_: img, inputs=[result], outputs=[prev_output])
630
-
631
  gr.api(infer_camera_edit, api_name="infer_edit_camera_angles")
632
  gr.api(create_video_between_images, api_name="create_video_between_images")
633
 
634
- demo.launch(mcp_server=True, theme=gr.themes.Citrus(), css=css, footer_links=["api", "gradio", "settings"])
 
 
3
  import random
4
  import torch
5
  import spaces
6
+ import base64
7
+ from io import BytesIO
8
 
9
  from PIL import Image
10
  from diffusers import FlowMatchEulerDiscreteScheduler
 
11
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
12
  from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
13
+ #from diffusers import QwenImageEditPlusPipeline, QwenImageTransformer2DModel
14
 
 
 
 
 
 
15
  import os
 
16
  from gradio_client import Client, handle_file
17
  import tempfile
18
  from typing import Optional, Tuple, Any
 
45
 
46
  spaces.aoti_blocks_load(pipe.transformer, "zerogpu-aoti/Qwen-Image", variant="fa3")
47
 
 
 
 
 
 
 
 
 
 
48
  MAX_SEED = np.iinfo(np.int32).max
49
 
50
 
 
54
  prompt: str,
55
  request: gr.Request
56
  ) -> str:
57
+ """Generate a single video segment between two frames."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  x_ip_token = request.headers['x-ip-token']
59
  video_client = Client(
60
  "multimodalart/wan-2-2-first-last-frame",
 
75
  vertical_tilt: float = 0.0,
76
  wideangle: bool = False
77
  ) -> str:
78
+ """Build a camera movement prompt based on the chosen controls."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  prompt_parts = []
80
 
 
81
  if rotate_deg != 0:
82
  direction = "left" if rotate_deg > 0 else "right"
83
  if direction == "left":
 
89
  f"将镜头向右旋转{abs(rotate_deg)}度 Rotate the camera {abs(rotate_deg)} degrees to the right."
90
  )
91
 
 
92
  if move_forward > 5:
93
  prompt_parts.append("将镜头转为特写镜头 Turn the camera to a close-up.")
94
  elif move_forward >= 1:
95
  prompt_parts.append("将镜头向前移动 Move the camera forward.")
96
 
 
97
  if vertical_tilt <= -1:
98
  prompt_parts.append("将相机转向鸟瞰视角 Turn the camera to a bird's-eye view.")
99
  elif vertical_tilt >= 1:
100
  prompt_parts.append("将相机切换到仰视视角 Turn the camera to a worm's-eye view.")
101
 
 
102
  if wideangle:
103
+ prompt_parts.append("将镜头转为广角镜头 Turn the camera to a wide-angle lens.")
104
 
105
  final_prompt = " ".join(prompt_parts).strip()
106
  return final_prompt if final_prompt else "no camera movement"
 
121
  width: Optional[int] = None,
122
  prev_output: Optional[Image.Image] = None,
123
  ) -> Tuple[Image.Image, int, str]:
124
+ """Edit the camera angles/view of an image with Qwen Image Edit 2509."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  progress = gr.Progress(track_tqdm=True)
126
 
127
  prompt = build_camera_prompt(rotate_deg, move_forward, vertical_tilt, wideangle)
 
131
  seed = random.randint(0, MAX_SEED)
132
  generator = torch.Generator(device=device).manual_seed(seed)
133
 
 
134
  pil_images = []
135
  if image is not None:
136
  if isinstance(image, Image.Image):
 
166
  prompt: str,
167
  request: gr.Request
168
  ) -> str:
169
+ """Create a short transition video between the input and output images."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
  if input_image is None or output_image is None:
171
  raise gr.Error("Both input and output images are required to create a video.")
172
 
 
191
  raise gr.Error(f"Video generation failed: {e}")
192
 
193
 
194
+ # --- 3D Camera Control Component for 2509 ---
195
+ # Using gr.HTML directly with templates (Gradio 6 style)
196
+
197
+ CAMERA_3D_HTML_TEMPLATE = """
198
+ <div id="camera-control-wrapper" style="width: 100%; height: 400px; position: relative; background: #1a1a1a; border-radius: 12px; overflow: hidden;">
199
+ <div id="prompt-overlay" style="position: absolute; bottom: 10px; left: 50%; transform: translateX(-50%); background: rgba(0,0,0,0.8); padding: 8px 16px; border-radius: 8px; font-family: monospace; font-size: 11px; color: #00ff88; white-space: nowrap; z-index: 10; max-width: 90%; overflow: hidden; text-overflow: ellipsis;"></div>
200
+ <div id="control-legend" style="position: absolute; top: 10px; left: 10px; background: rgba(0,0,0,0.7); padding: 8px 12px; border-radius: 8px; font-family: system-ui; font-size: 11px; color: #fff; z-index: 10;">
201
+ <div style="margin-bottom: 4px;"><span style="color: #00ff88;">●</span> Rotation (↔)</div>
202
+ <div style="margin-bottom: 4px;"><span style="color: #ff69b4;">●</span> Vertical Tilt (↕)</div>
203
+ <div><span style="color: #ffa500;">●</span> Distance/Zoom</div>
204
+ </div>
205
+ </div>
206
+ """
207
+
208
+ CAMERA_3D_JS = """
209
+ (() => {
210
+ const wrapper = element.querySelector('#camera-control-wrapper');
211
+ const promptOverlay = element.querySelector('#prompt-overlay');
212
+
213
+ const initScene = () => {
214
+ if (typeof THREE === 'undefined') {
215
+ setTimeout(initScene, 100);
216
+ return;
217
+ }
218
+
219
+ const scene = new THREE.Scene();
220
+ scene.background = new THREE.Color(0x1a1a1a);
221
+
222
+ const camera = new THREE.PerspectiveCamera(50, wrapper.clientWidth / wrapper.clientHeight, 0.1, 1000);
223
+ camera.position.set(4, 3, 4);
224
+ camera.lookAt(0, 0.75, 0);
225
+
226
+ const renderer = new THREE.WebGLRenderer({ antialias: true });
227
+ renderer.setSize(wrapper.clientWidth, wrapper.clientHeight);
228
+ renderer.setPixelRatio(Math.min(window.devicePixelRatio, 2));
229
+ wrapper.insertBefore(renderer.domElement, wrapper.firstChild);
230
+
231
+ scene.add(new THREE.AmbientLight(0xffffff, 0.6));
232
+ const dirLight = new THREE.DirectionalLight(0xffffff, 0.6);
233
+ dirLight.position.set(5, 10, 5);
234
+ scene.add(dirLight);
235
+
236
+ scene.add(new THREE.GridHelper(6, 12, 0x333333, 0x222222));
237
+
238
+ const CENTER = new THREE.Vector3(0, 0.75, 0);
239
+ const BASE_DISTANCE = 2.0;
240
+ const ROTATION_RADIUS = 2.2;
241
+ const TILT_RADIUS = 1.6;
242
+
243
+ let rotateDeg = props.value?.rotate_deg || 0;
244
+ let moveForward = props.value?.move_forward || 0;
245
+ let verticalTilt = props.value?.vertical_tilt || 0;
246
+ let wideangle = props.value?.wideangle || false;
247
+
248
+ const rotateSteps = [-90, -45, 0, 45, 90];
249
+ const forwardSteps = [0, 5, 10];
250
+ const tiltSteps = [-1, 0, 1];
251
+
252
+ function snapToNearest(value, steps) {
253
+ return steps.reduce((prev, curr) => Math.abs(curr - value) < Math.abs(prev - value) ? curr : prev);
254
+ }
255
+
256
+ function createPlaceholderTexture() {
257
+ const canvas = document.createElement('canvas');
258
+ canvas.width = 256;
259
+ canvas.height = 256;
260
+ const ctx = canvas.getContext('2d');
261
+ ctx.fillStyle = '#3a3a4a';
262
+ ctx.fillRect(0, 0, 256, 256);
263
+ ctx.fillStyle = '#ffcc99';
264
+ ctx.beginPath();
265
+ ctx.arc(128, 128, 80, 0, Math.PI * 2);
266
+ ctx.fill();
267
+ ctx.fillStyle = '#333';
268
+ ctx.beginPath();
269
+ ctx.arc(100, 110, 10, 0, Math.PI * 2);
270
+ ctx.arc(156, 110, 10, 0, Math.PI * 2);
271
+ ctx.fill();
272
+ ctx.strokeStyle = '#333';
273
+ ctx.lineWidth = 3;
274
+ ctx.beginPath();
275
+ ctx.arc(128, 130, 35, 0.2, Math.PI - 0.2);
276
+ ctx.stroke();
277
+ return new THREE.CanvasTexture(canvas);
278
+ }
279
+
280
+ let currentTexture = createPlaceholderTexture();
281
+ const planeMaterial = new THREE.MeshBasicMaterial({ map: currentTexture, side: THREE.DoubleSide });
282
+ let targetPlane = new THREE.Mesh(new THREE.PlaneGeometry(1.2, 1.2), planeMaterial);
283
+ targetPlane.position.copy(CENTER);
284
+ scene.add(targetPlane);
285
+
286
+ function updateTextureFromUrl(url) {
287
+ if (!url) {
288
+ planeMaterial.map = createPlaceholderTexture();
289
+ planeMaterial.needsUpdate = true;
290
+ scene.remove(targetPlane);
291
+ targetPlane = new THREE.Mesh(new THREE.PlaneGeometry(1.2, 1.2), planeMaterial);
292
+ targetPlane.position.copy(CENTER);
293
+ scene.add(targetPlane);
294
+ return;
295
+ }
296
+
297
+ const loader = new THREE.TextureLoader();
298
+ loader.crossOrigin = 'anonymous';
299
+ loader.load(url, (texture) => {
300
+ texture.minFilter = THREE.LinearFilter;
301
+ texture.magFilter = THREE.LinearFilter;
302
+ planeMaterial.map = texture;
303
+ planeMaterial.needsUpdate = true;
304
+
305
+ const img = texture.image;
306
+ if (img && img.width && img.height) {
307
+ const aspect = img.width / img.height;
308
+ const maxSize = 1.4;
309
+ let planeWidth, planeHeight;
310
+ if (aspect > 1) {
311
+ planeWidth = maxSize;
312
+ planeHeight = maxSize / aspect;
313
+ } else {
314
+ planeHeight = maxSize;
315
+ planeWidth = maxSize * aspect;
316
+ }
317
+ scene.remove(targetPlane);
318
+ targetPlane = new THREE.Mesh(new THREE.PlaneGeometry(planeWidth, planeHeight), planeMaterial);
319
+ targetPlane.position.copy(CENTER);
320
+ scene.add(targetPlane);
321
+ }
322
+ });
323
+ }
324
+
325
+ if (props.imageUrl) {
326
+ updateTextureFromUrl(props.imageUrl);
327
+ }
328
+
329
+ const cameraGroup = new THREE.Group();
330
+ const bodyMat = new THREE.MeshStandardMaterial({ color: 0x6699cc, metalness: 0.5, roughness: 0.3 });
331
+ const body = new THREE.Mesh(new THREE.BoxGeometry(0.28, 0.2, 0.35), bodyMat);
332
+ cameraGroup.add(body);
333
+ const lens = new THREE.Mesh(
334
+ new THREE.CylinderGeometry(0.08, 0.1, 0.16, 16),
335
+ new THREE.MeshStandardMaterial({ color: 0x6699cc, metalness: 0.5, roughness: 0.3 })
336
+ );
337
+ lens.rotation.x = Math.PI / 2;
338
+ lens.position.z = 0.24;
339
+ cameraGroup.add(lens);
340
+ scene.add(cameraGroup);
341
+
342
+ const rotationArcPoints = [];
343
+ for (let i = 0; i <= 32; i++) {
344
+ const angle = THREE.MathUtils.degToRad(-90 + (180 * i / 32));
345
+ rotationArcPoints.push(new THREE.Vector3(ROTATION_RADIUS * Math.sin(angle), 0.05, ROTATION_RADIUS * Math.cos(angle)));
346
+ }
347
+ const rotationCurve = new THREE.CatmullRomCurve3(rotationArcPoints);
348
+ const rotationArc = new THREE.Mesh(
349
+ new THREE.TubeGeometry(rotationCurve, 32, 0.035, 8, false),
350
+ new THREE.MeshStandardMaterial({ color: 0x00ff88, emissive: 0x00ff88, emissiveIntensity: 0.3 })
351
+ );
352
+ scene.add(rotationArc);
353
+
354
+ const rotationHandle = new THREE.Mesh(
355
+ new THREE.SphereGeometry(0.16, 16, 16),
356
+ new THREE.MeshStandardMaterial({ color: 0x00ff88, emissive: 0x00ff88, emissiveIntensity: 0.5 })
357
+ );
358
+ rotationHandle.userData.type = 'rotation';
359
+ scene.add(rotationHandle);
360
+
361
+ const tiltArcPoints = [];
362
+ for (let i = 0; i <= 32; i++) {
363
+ const angle = THREE.MathUtils.degToRad(-45 + (90 * i / 32));
364
+ tiltArcPoints.push(new THREE.Vector3(-0.7, TILT_RADIUS * Math.sin(angle) + CENTER.y, TILT_RADIUS * Math.cos(angle)));
365
+ }
366
+ const tiltCurve = new THREE.CatmullRomCurve3(tiltArcPoints);
367
+ const tiltArc = new THREE.Mesh(
368
+ new THREE.TubeGeometry(tiltCurve, 32, 0.035, 8, false),
369
+ new THREE.MeshStandardMaterial({ color: 0xff69b4, emissive: 0xff69b4, emissiveIntensity: 0.3 })
370
+ );
371
+ scene.add(tiltArc);
372
+
373
+ const tiltHandle = new THREE.Mesh(
374
+ new THREE.SphereGeometry(0.16, 16, 16),
375
+ new THREE.MeshStandardMaterial({ color: 0xff69b4, emissive: 0xff69b4, emissiveIntensity: 0.5 })
376
+ );
377
+ tiltHandle.userData.type = 'tilt';
378
+ scene.add(tiltHandle);
379
+
380
+ const distanceLineGeo = new THREE.BufferGeometry();
381
+ const distanceLine = new THREE.Line(distanceLineGeo, new THREE.LineBasicMaterial({ color: 0xffa500 }));
382
+ scene.add(distanceLine);
383
+
384
+ const distanceHandle = new THREE.Mesh(
385
+ new THREE.SphereGeometry(0.16, 16, 16),
386
+ new THREE.MeshStandardMaterial({ color: 0xffa500, emissive: 0xffa500, emissiveIntensity: 0.5 })
387
+ );
388
+ distanceHandle.userData.type = 'distance';
389
+ scene.add(distanceHandle);
390
+
391
+ function buildPromptText(rot, fwd, tilt, wide) {
392
+ const parts = [];
393
+ if (rot !== 0) {
394
+ const dir = rot > 0 ? 'left' : 'right';
395
+ parts.push('Rotate ' + Math.abs(rot) + '° ' + dir);
396
+ }
397
+ if (fwd > 5) parts.push('Close-up');
398
+ else if (fwd >= 1) parts.push('Move forward');
399
+ if (tilt <= -1) parts.push("Bird's-eye");
400
+ else if (tilt >= 1) parts.push("Worm's-eye");
401
+ if (wide) parts.push('Wide-angle');
402
+ return parts.length > 0 ? parts.join(' • ') : 'No camera movement';
403
+ }
404
+
405
+ function updatePositions() {
406
+ const rotRad = THREE.MathUtils.degToRad(-rotateDeg);
407
+ const distance = BASE_DISTANCE - (moveForward / 10) * 1.0;
408
+ // Invert: worm's-eye (1) = camera DOWN, bird's-eye (-1) = camera UP
409
+ const tiltAngle = -verticalTilt * 35;
410
+ const tiltRad = THREE.MathUtils.degToRad(tiltAngle);
411
+
412
+ const camX = distance * Math.sin(rotRad) * Math.cos(tiltRad);
413
+ const camY = distance * Math.sin(tiltRad) + CENTER.y;
414
+ const camZ = distance * Math.cos(rotRad) * Math.cos(tiltRad);
415
+
416
+ cameraGroup.position.set(camX, camY, camZ);
417
+ cameraGroup.lookAt(CENTER);
418
+
419
+ rotationHandle.position.set(ROTATION_RADIUS * Math.sin(rotRad), 0.05, ROTATION_RADIUS * Math.cos(rotRad));
420
+
421
+ const tiltHandleAngle = THREE.MathUtils.degToRad(tiltAngle);
422
+ tiltHandle.position.set(-0.7, TILT_RADIUS * Math.sin(tiltHandleAngle) + CENTER.y, TILT_RADIUS * Math.cos(tiltHandleAngle));
423
+
424
+ const handleDist = distance - 0.4;
425
+ distanceHandle.position.set(
426
+ handleDist * Math.sin(rotRad) * Math.cos(tiltRad),
427
+ handleDist * Math.sin(tiltRad) + CENTER.y,
428
+ handleDist * Math.cos(rotRad) * Math.cos(tiltRad)
429
+ );
430
+ distanceLineGeo.setFromPoints([cameraGroup.position.clone(), CENTER.clone()]);
431
+
432
+ promptOverlay.textContent = buildPromptText(rotateDeg, moveForward, verticalTilt, wideangle);
433
+ }
434
+
435
+ function updatePropsAndTrigger() {
436
+ const rotSnap = snapToNearest(rotateDeg, rotateSteps);
437
+ const fwdSnap = snapToNearest(moveForward, forwardSteps);
438
+ const tiltSnap = snapToNearest(verticalTilt, tiltSteps);
439
+
440
+ props.value = { rotate_deg: rotSnap, move_forward: fwdSnap, vertical_tilt: tiltSnap, wideangle: wideangle };
441
+ trigger('change', props.value);
442
+ }
443
+
444
+ const raycaster = new THREE.Raycaster();
445
+ const mouse = new THREE.Vector2();
446
+ let isDragging = false;
447
+ let dragTarget = null;
448
+ let dragStartMouse = new THREE.Vector2();
449
+ let dragStartForward = 0;
450
+ const intersection = new THREE.Vector3();
451
+
452
+ const canvas = renderer.domElement;
453
+
454
+ canvas.addEventListener('mousedown', (e) => {
455
+ const rect = canvas.getBoundingClientRect();
456
+ mouse.x = ((e.clientX - rect.left) / rect.width) * 2 - 1;
457
+ mouse.y = -((e.clientY - rect.top) / rect.height) * 2 + 1;
458
+
459
+ raycaster.setFromCamera(mouse, camera);
460
+ const intersects = raycaster.intersectObjects([rotationHandle, tiltHandle, distanceHandle]);
461
+
462
+ if (intersects.length > 0) {
463
+ isDragging = true;
464
+ dragTarget = intersects[0].object;
465
+ dragTarget.material.emissiveIntensity = 1.0;
466
+ dragTarget.scale.setScalar(1.3);
467
+ dragStartMouse.copy(mouse);
468
+ dragStartForward = moveForward;
469
+ canvas.style.cursor = 'grabbing';
470
+ }
471
+ });
472
+
473
+ canvas.addEventListener('mousemove', (e) => {
474
+ const rect = canvas.getBoundingClientRect();
475
+ mouse.x = ((e.clientX - rect.left) / rect.width) * 2 - 1;
476
+ mouse.y = -((e.clientY - rect.top) / rect.height) * 2 + 1;
477
+
478
+ if (isDragging && dragTarget) {
479
+ raycaster.setFromCamera(mouse, camera);
480
+
481
+ if (dragTarget.userData.type === 'rotation') {
482
+ const plane = new THREE.Plane(new THREE.Vector3(0, 1, 0), -0.05);
483
+ if (raycaster.ray.intersectPlane(plane, intersection)) {
484
+ let angle = THREE.MathUtils.radToDeg(Math.atan2(intersection.x, intersection.z));
485
+ rotateDeg = THREE.MathUtils.clamp(-angle, -90, 90);
486
+ }
487
+ } else if (dragTarget.userData.type === 'tilt') {
488
+ const plane = new THREE.Plane(new THREE.Vector3(1, 0, 0), 0.7);
489
+ if (raycaster.ray.intersectPlane(plane, intersection)) {
490
+ const relY = intersection.y - CENTER.y;
491
+ const relZ = intersection.z;
492
+ const angle = THREE.MathUtils.radToDeg(Math.atan2(relY, relZ));
493
+ // Invert: drag DOWN = worm's-eye (1), drag UP = bird's-eye (-1)
494
+ verticalTilt = THREE.MathUtils.clamp(-angle / 35, -1, 1);
495
+ }
496
+ } else if (dragTarget.userData.type === 'distance') {
497
+ const deltaY = mouse.y - dragStartMouse.y;
498
+ moveForward = THREE.MathUtils.clamp(dragStartForward + deltaY * 12, 0, 10);
499
+ }
500
+ updatePositions();
501
+ } else {
502
+ raycaster.setFromCamera(mouse, camera);
503
+ const intersects = raycaster.intersectObjects([rotationHandle, tiltHandle, distanceHandle]);
504
+ [rotationHandle, tiltHandle, distanceHandle].forEach(h => {
505
+ h.material.emissiveIntensity = 0.5;
506
+ h.scale.setScalar(1);
507
+ });
508
+ if (intersects.length > 0) {
509
+ intersects[0].object.material.emissiveIntensity = 0.8;
510
+ intersects[0].object.scale.setScalar(1.1);
511
+ canvas.style.cursor = 'grab';
512
+ } else {
513
+ canvas.style.cursor = 'default';
514
+ }
515
+ }
516
+ });
517
+
518
+ const onMouseUp = () => {
519
+ if (dragTarget) {
520
+ dragTarget.material.emissiveIntensity = 0.5;
521
+ dragTarget.scale.setScalar(1);
522
+
523
+ const targetRot = snapToNearest(rotateDeg, rotateSteps);
524
+ const targetFwd = snapToNearest(moveForward, forwardSteps);
525
+ const targetTilt = snapToNearest(verticalTilt, tiltSteps);
526
+
527
+ const startRot = rotateDeg, startFwd = moveForward, startTilt = verticalTilt;
528
+ const startTime = Date.now();
529
+
530
+ function animateSnap() {
531
+ const t = Math.min((Date.now() - startTime) / 200, 1);
532
+ const ease = 1 - Math.pow(1 - t, 3);
533
+
534
+ rotateDeg = startRot + (targetRot - startRot) * ease;
535
+ moveForward = startFwd + (targetFwd - startFwd) * ease;
536
+ verticalTilt = startTilt + (targetTilt - startTilt) * ease;
537
+
538
+ updatePositions();
539
+ if (t < 1) requestAnimationFrame(animateSnap);
540
+ else updatePropsAndTrigger();
541
+ }
542
+ animateSnap();
543
+ }
544
+ isDragging = false;
545
+ dragTarget = null;
546
+ canvas.style.cursor = 'default';
547
+ };
548
+
549
+ canvas.addEventListener('mouseup', onMouseUp);
550
+ canvas.addEventListener('mouseleave', onMouseUp);
551
+
552
+ canvas.addEventListener('touchstart', (e) => {
553
+ e.preventDefault();
554
+ const touch = e.touches[0];
555
+ const rect = canvas.getBoundingClientRect();
556
+ mouse.x = ((touch.clientX - rect.left) / rect.width) * 2 - 1;
557
+ mouse.y = -((touch.clientY - rect.top) / rect.height) * 2 + 1;
558
+
559
+ raycaster.setFromCamera(mouse, camera);
560
+ const intersects = raycaster.intersectObjects([rotationHandle, tiltHandle, distanceHandle]);
561
+
562
+ if (intersects.length > 0) {
563
+ isDragging = true;
564
+ dragTarget = intersects[0].object;
565
+ dragTarget.material.emissiveIntensity = 1.0;
566
+ dragTarget.scale.setScalar(1.3);
567
+ dragStartMouse.copy(mouse);
568
+ dragStartForward = moveForward;
569
+ }
570
+ }, { passive: false });
571
+
572
+ canvas.addEventListener('touchmove', (e) => {
573
+ e.preventDefault();
574
+ const touch = e.touches[0];
575
+ const rect = canvas.getBoundingClientRect();
576
+ mouse.x = ((touch.clientX - rect.left) / rect.width) * 2 - 1;
577
+ mouse.y = -((touch.clientY - rect.top) / rect.height) * 2 + 1;
578
+
579
+ if (isDragging && dragTarget) {
580
+ raycaster.setFromCamera(mouse, camera);
581
+
582
+ if (dragTarget.userData.type === 'rotation') {
583
+ const plane = new THREE.Plane(new THREE.Vector3(0, 1, 0), -0.05);
584
+ if (raycaster.ray.intersectPlane(plane, intersection)) {
585
+ let angle = THREE.MathUtils.radToDeg(Math.atan2(intersection.x, intersection.z));
586
+ rotateDeg = THREE.MathUtils.clamp(-angle, -90, 90);
587
+ }
588
+ } else if (dragTarget.userData.type === 'tilt') {
589
+ const plane = new THREE.Plane(new THREE.Vector3(1, 0, 0), 0.7);
590
+ if (raycaster.ray.intersectPlane(plane, intersection)) {
591
+ const relY = intersection.y - CENTER.y;
592
+ const relZ = intersection.z;
593
+ const angle = THREE.MathUtils.radToDeg(Math.atan2(relY, relZ));
594
+ // Invert: drag DOWN = worm's-eye (1), drag UP = bird's-eye (-1)
595
+ verticalTilt = THREE.MathUtils.clamp(-angle / 35, -1, 1);
596
+ }
597
+ } else if (dragTarget.userData.type === 'distance') {
598
+ const deltaY = mouse.y - dragStartMouse.y;
599
+ moveForward = THREE.MathUtils.clamp(dragStartForward + deltaY * 12, 0, 10);
600
+ }
601
+ updatePositions();
602
+ }
603
+ }, { passive: false });
604
+
605
+ canvas.addEventListener('touchend', (e) => { e.preventDefault(); onMouseUp(); }, { passive: false });
606
+ canvas.addEventListener('touchcancel', (e) => { e.preventDefault(); onMouseUp(); }, { passive: false });
607
+
608
+ updatePositions();
609
+
610
+ function render() {
611
+ requestAnimationFrame(render);
612
+ renderer.render(scene, camera);
613
+ }
614
+ render();
615
+
616
+ new ResizeObserver(() => {
617
+ camera.aspect = wrapper.clientWidth / wrapper.clientHeight;
618
+ camera.updateProjectionMatrix();
619
+ renderer.setSize(wrapper.clientWidth, wrapper.clientHeight);
620
+ }).observe(wrapper);
621
+
622
+ wrapper._updateTexture = updateTextureFromUrl;
623
+
624
+ let lastImageUrl = props.imageUrl;
625
+ let lastValue = JSON.stringify(props.value);
626
+ setInterval(() => {
627
+ if (props.imageUrl !== lastImageUrl) {
628
+ lastImageUrl = props.imageUrl;
629
+ updateTextureFromUrl(props.imageUrl);
630
+ }
631
+ const currentValue = JSON.stringify(props.value);
632
+ if (currentValue !== lastValue) {
633
+ lastValue = currentValue;
634
+ if (props.value && typeof props.value === 'object') {
635
+ rotateDeg = props.value.rotate_deg ?? rotateDeg;
636
+ moveForward = props.value.move_forward ?? moveForward;
637
+ verticalTilt = props.value.vertical_tilt ?? verticalTilt;
638
+ wideangle = props.value.wideangle ?? wideangle;
639
+ updatePositions();
640
+ }
641
+ }
642
+ }, 100);
643
+ };
644
+
645
+ initScene();
646
+ })();
647
+ """
648
 
649
 
650
+ def create_camera_3d_component(value=None, imageUrl=None, **kwargs):
651
+ """Create a 3D camera control component using gr.HTML."""
652
+ if value is None:
653
+ value = {"rotate_deg": 0, "move_forward": 0, "vertical_tilt": 0, "wideangle": False}
654
+
655
+ return gr.HTML(
656
+ value=value,
657
+ html_template=CAMERA_3D_HTML_TEMPLATE,
658
+ js_on_load=CAMERA_3D_JS,
659
+ imageUrl=imageUrl,
660
+ **kwargs
661
+ )
662
 
 
 
 
 
 
 
663
 
664
+ # --- UI ---
665
+ css = '''
666
+ #col-container { max-width: 1100px; margin: 0 auto; }
667
+ .dark .progress-text { color: white !important; }
668
+ #camera-3d-control { min-height: 400px; }
669
+ #examples {
670
+ margin-top: 20px;
671
+ }
672
+ '''
673
 
674
 
675
+ def reset_all() -> list:
676
+ """Reset all camera control knobs and flags to their default values."""
677
+ return [0, 0, 0, False, True]
678
 
 
 
679
 
680
+ def end_reset() -> bool:
681
+ """Mark the end of a reset cycle."""
 
 
682
  return False
683
 
684
 
685
+ def update_dimensions_on_upload(image: Optional[Image.Image]) -> Tuple[int, int]:
686
+ """Compute recommended (width, height) for the output resolution."""
 
 
 
 
 
 
 
 
 
 
 
 
 
687
  if image is None:
688
  return 1024, 1024
689
 
 
698
  aspect_ratio = original_width / original_height
699
  new_width = int(new_height * aspect_ratio)
700
 
 
701
  new_width = (new_width // 8) * 8
702
  new_height = (new_height // 8) * 8
703
 
704
  return new_width, new_height
705
 
706
 
707
+ with gr.Blocks(css=css, theme=gr.themes.Citrus()) as demo:
708
+ gr.Markdown("""
709
+ ## 🎬 Qwen Image Edit — Camera Angle Control
710
+
711
+ Qwen Image Edit 2509 for Camera Control ✨
712
+ Using [dx8152's Qwen-Edit-2509-Multiple-angles LoRA](https://huggingface.co/dx8152/Qwen-Edit-2509-Multiple-angles) and [Phr00t/Qwen-Image-Edit-Rapid-AIO](https://huggingface.co/Phr00t/Qwen-Image-Edit-Rapid-AIO/tree/main) for 4-step inference 💨
713
+ """)
714
+
715
+ with gr.Row():
716
+ with gr.Column(scale=1):
717
+
718
+ image = gr.Image(label="Input Image", type="pil", height=280)
719
+ prev_output = gr.Image(value=None, visible=False)
720
+ is_reset = gr.Checkbox(value=False, visible=False)
721
+
722
+ with gr.Tab("🎮 3D Camera Control"):
723
+ gr.Markdown("*Drag the handles: 🟢 Rotation, 🩷 Tilt, 🟠 Distance*")
724
+
725
+ camera_3d = create_camera_3d_component(
726
+ value={"rotate_deg": 0, "move_forward": 0, "vertical_tilt": 0, "wideangle": False},
727
+ elem_id="camera-3d-control"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
728
  )
729
+ with gr.Tab("🎚️ Slider Controls"):
730
+ rotate_deg = gr.Slider(label="Rotate Right ↔ Left (°)", minimum=-90, maximum=90, step=45, value=0)
731
+ move_forward = gr.Slider(label="Move Forward → Close-Up", minimum=0, maximum=10, step=5, value=0)
732
+ vertical_tilt = gr.Slider(label="Vertical: Bird's-eye ↔ Worm's-eye", minimum=-1, maximum=1, step=1, value=0)
733
+ wideangle = gr.Checkbox(label="🔭 Wide-Angle Lens", value=False)
734
+
735
+ with gr.Row():
736
+ reset_btn = gr.Button("🔄 Reset")
737
+ run_btn = gr.Button("🚀 Generate", variant="primary")
738
+
739
+ with gr.Column(scale=1):
740
+ result = gr.Image(label="Output Image", interactive=False, height=350)
741
+ prompt_preview = gr.Textbox(label="Generated Prompt", interactive=False)
742
+
743
+ create_video_button = gr.Button(
744
+ "🎥 Create Video Between Images",
745
+ variant="secondary",
746
+ visible=False
747
+ )
748
+ with gr.Group(visible=False) as video_group:
749
+ video_output = gr.Video(label="Generated Video", buttons=["download"], autoplay=True)
750
+
751
+
752
+ with gr.Accordion("⚙️ Advanced Settings", open=False):
753
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
754
+ randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
755
+ true_guidance_scale = gr.Slider(label="True Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
756
+ num_inference_steps = gr.Slider(label="Inference Steps", minimum=1, maximum=40, step=1, value=4)
757
+ height = gr.Slider(label="Height", minimum=256, maximum=2048, step=8, value=1024)
758
+ width = gr.Slider(label="Width", minimum=256, maximum=2048, step=8, value=1024)
759
+
760
+ # --- Helper Functions ---
761
+ def update_prompt_from_sliders(rotate, forward, tilt, wide):
762
+ return build_camera_prompt(rotate, forward, tilt, wide)
763
+
764
+ def sync_3d_to_sliders(camera_value):
765
+ if camera_value and isinstance(camera_value, dict):
766
+ rot = camera_value.get('rotate_deg', 0)
767
+ fwd = camera_value.get('move_forward', 0)
768
+ tilt = camera_value.get('vertical_tilt', 0)
769
+ wide = camera_value.get('wideangle', False)
770
+ prompt = build_camera_prompt(rot, fwd, tilt, wide)
771
+ return rot, fwd, tilt, wide, prompt
772
+ return gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
773
+
774
+ def sync_sliders_to_3d(rotate, forward, tilt, wide):
775
+ return {"rotate_deg": rotate, "move_forward": forward, "vertical_tilt": tilt, "wideangle": wide}
776
+
777
+ def update_3d_image(img):
778
+ if img is None:
779
+ return gr.update(imageUrl=None)
780
+ buffered = BytesIO()
781
+ img.save(buffered, format="PNG")
782
+ img_str = base64.b64encode(buffered.getvalue()).decode()
783
+ data_url = f"data:image/png;base64,{img_str}"
784
+ return gr.update(imageUrl=data_url)
785
+
786
+ # Define inputs/outputs
787
+ inputs = [image, rotate_deg, move_forward, vertical_tilt, wideangle, seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width, prev_output]
788
  outputs = [result, seed, prompt_preview]
789
+ control_inputs = [image, rotate_deg, move_forward, vertical_tilt, wideangle, seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width, prev_output]
790
+ control_inputs_with_flag = [is_reset] + control_inputs
791
+
792
+ def maybe_infer(is_reset_val: bool, progress: gr.Progress = gr.Progress(track_tqdm=True), *args: Any):
793
+ if is_reset_val:
794
+ return gr.update(), gr.update(), gr.update(), gr.update()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
795
  result_img, result_seed, result_prompt = infer_camera_edit(*args)
 
796
  show_button = args[0] is not None and result_img is not None
797
  return result_img, result_seed, result_prompt, gr.update(visible=show_button)
798
+
799
+ # --- Event Handlers ---
800
+
801
+ # Slider -> Prompt preview
802
+ for slider in [rotate_deg, move_forward, vertical_tilt]:
803
+ slider.change(fn=update_prompt_from_sliders, inputs=[rotate_deg, move_forward, vertical_tilt, wideangle], outputs=[prompt_preview])
804
+ wideangle.change(fn=update_prompt_from_sliders, inputs=[rotate_deg, move_forward, vertical_tilt, wideangle], outputs=[prompt_preview])
805
+
806
+ # 3D control -> Sliders + Prompt + Inference
807
+ camera_3d.change(
808
+ fn=sync_3d_to_sliders,
809
+ inputs=[camera_3d],
810
+ outputs=[rotate_deg, move_forward, vertical_tilt, wideangle, prompt_preview]
811
+ ).then(
812
+ fn=maybe_infer,
813
+ inputs=control_inputs_with_flag,
814
  outputs=outputs + [create_video_button]
815
  )
816
+
817
+ # Sliders -> 3D control
818
+ for slider in [rotate_deg, move_forward, vertical_tilt]:
819
+ slider.release(fn=sync_sliders_to_3d, inputs=[rotate_deg, move_forward, vertical_tilt, wideangle], outputs=[camera_3d])
820
+ wideangle.input(fn=sync_sliders_to_3d, inputs=[rotate_deg, move_forward, vertical_tilt, wideangle], outputs=[camera_3d])
821
+
822
+ # Reset
823
+ reset_btn.click(fn=reset_all, inputs=None, outputs=[rotate_deg, move_forward, vertical_tilt, wideangle, is_reset], queue=False
824
+ ).then(fn=end_reset, inputs=None, outputs=[is_reset], queue=False
825
+ ).then(fn=sync_sliders_to_3d, inputs=[rotate_deg, move_forward, vertical_tilt, wideangle], outputs=[camera_3d])
826
+
827
+ # Generate button
828
+ def infer_and_show_video_button(*args: Any):
829
+ result_img, result_seed, result_prompt = infer_camera_edit(*args)
830
+ show_button = args[0] is not None and result_img is not None
831
+ return result_img, result_seed, result_prompt, gr.update(visible=show_button)
832
+
833
+ run_event = run_btn.click(fn=infer_and_show_video_button, inputs=inputs, outputs=outputs + [create_video_button])
834
+
835
  # Video creation
836
+ create_video_button.click(fn=lambda: gr.update(visible=True), outputs=[video_group], api_visibility="private"
837
+ ).then(fn=create_video_between_images, inputs=[image, result, prompt_preview], outputs=[video_output], api_visibility="private")
838
+
839
+ # Image upload
840
+ image.upload(fn=update_dimensions_on_upload, inputs=[image], outputs=[width, height]
841
+ ).then(fn=reset_all, inputs=None, outputs=[rotate_deg, move_forward, vertical_tilt, wideangle, is_reset], queue=False
842
+ ).then(fn=end_reset, inputs=None, outputs=[is_reset], queue=False
843
+ ).then(fn=update_3d_image, inputs=[image], outputs=[camera_3d])
844
+
845
+ image.clear(fn=lambda: gr.update(imageUrl=None), outputs=[camera_3d])
846
+
847
+ # Live updates on slider release
848
+ for control in [rotate_deg, move_forward, vertical_tilt]:
849
+ control.release(fn=maybe_infer, inputs=control_inputs_with_flag, outputs=outputs + [create_video_button])
850
+ wideangle.input(fn=maybe_infer, inputs=control_inputs_with_flag, outputs=outputs + [create_video_button])
851
+
852
+ run_event.then(lambda img, *_: img, inputs=[result], outputs=[prev_output])
853
+
854
 
 
855
  gr.Examples(
856
  examples=[
857
  ["tool_of_the_sea.png", 90, 0, 0, False, 0, True, 1.0, 4, 568, 1024],
 
860
  ["disaster_girl.jpg", -45, 0, 1, False, 0, True, 1.0, 4, 768, 1024],
861
  ["grumpy.png", 90, 0, 1, False, 0, True, 1.0, 4, 576, 1024]
862
  ],
863
+ inputs=[image, rotate_deg, move_forward, vertical_tilt, wideangle, seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width],
 
 
 
 
864
  outputs=outputs,
865
  fn=infer_camera_edit,
866
  cache_examples=True,
867
  cache_mode="lazy",
868
  elem_id="examples"
869
  )
870
+
871
+ # Sync 3D component when sliders change (covers example loading)
872
+ def sync_3d_on_slider_change(img, rot, fwd, tilt, wide):
873
+ camera_value = {"rotate_deg": rot, "move_forward": fwd, "vertical_tilt": tilt, "wideangle": wide}
874
+ if img is not None:
875
+ buffered = BytesIO()
876
+ img.save(buffered, format="PNG")
877
+ img_str = base64.b64encode(buffered.getvalue()).decode()
878
+ data_url = f"data:image/png;base64,{img_str}"
879
+ return gr.update(value=camera_value, imageUrl=data_url)
880
+ return gr.update(value=camera_value)
881
+
882
+ # When any slider value changes (including from examples), sync the 3D component
883
+ for slider in [rotate_deg, move_forward, vertical_tilt]:
884
+ slider.change(fn=sync_3d_on_slider_change, inputs=[image, rotate_deg, move_forward, vertical_tilt, wideangle], outputs=[camera_3d])
885
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
886
  gr.api(infer_camera_edit, api_name="infer_edit_camera_angles")
887
  gr.api(create_video_between_images, api_name="create_video_between_images")
888
 
889
+ head = '<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r128/three.min.js"></script>'
890
+ demo.launch(mcp_server=True, head=head, footer_links=["api", "gradio", "settings"])