zxc4wewewe commited on
Commit
61fa8fe
·
verified ·
1 Parent(s): fff473d

Upload 11 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ 0fe3667f2e073b314158f58ad66c58dd9f31cea12496589d4acf576f923adf45 filter=lfs diff=lfs merge=lfs -text
37
+ 1344c764ac72d26bee7f8e76020ba81ba05df251a0122beea57a65ce85f6d05f filter=lfs diff=lfs merge=lfs -text
38
+ 21aa297dafc67ac89bff93255a026eba67b63023e921a8bf918e7b0e81c09eae filter=lfs diff=lfs merge=lfs -text
39
+ 27193cae97c24a31ab574a21fc3c598627c28ab60edb0c60209acdb8071cf1ea filter=lfs diff=lfs merge=lfs -text
40
+ e49ad4d8e649dc6e8f38356dc7b3ea1de5a3c112b58a61ed321f6c107810a93d filter=lfs diff=lfs merge=lfs -text
0fe3667f2e073b314158f58ad66c58dd9f31cea12496589d4acf576f923adf45 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fe3667f2e073b314158f58ad66c58dd9f31cea12496589d4acf576f923adf45
3
+ size 947895
1344c764ac72d26bee7f8e76020ba81ba05df251a0122beea57a65ce85f6d05f ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1344c764ac72d26bee7f8e76020ba81ba05df251a0122beea57a65ce85f6d05f
3
+ size 715217
21aa297dafc67ac89bff93255a026eba67b63023e921a8bf918e7b0e81c09eae ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21aa297dafc67ac89bff93255a026eba67b63023e921a8bf918e7b0e81c09eae
3
+ size 372157
27193cae97c24a31ab574a21fc3c598627c28ab60edb0c60209acdb8071cf1ea ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27193cae97c24a31ab574a21fc3c598627c28ab60edb0c60209acdb8071cf1ea
3
+ size 1360277
3cf76188e4938c72e4c1fb9f9ab8baafffcd5a32 ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ """
3
+
4
+ from typing import Any
5
+ from typing import Callable
6
+ from typing import ParamSpec
7
+ from torchao.quantization import quantize_
8
+ from torchao.quantization import Float8DynamicActivationFloat8WeightConfig
9
+ import spaces
10
+ import torch
11
+ from torch.utils._pytree import tree_map
12
+
13
+
14
+ P = ParamSpec('P')
15
+
16
+
17
+ TRANSFORMER_IMAGE_SEQ_LENGTH_DIM = torch.export.Dim('image_seq_length')
18
+ TRANSFORMER_TEXT_SEQ_LENGTH_DIM = torch.export.Dim('text_seq_length')
19
+
20
+ TRANSFORMER_DYNAMIC_SHAPES = {
21
+ 'hidden_states': {
22
+ 1: TRANSFORMER_IMAGE_SEQ_LENGTH_DIM,
23
+ },
24
+ 'encoder_hidden_states': {
25
+ 1: TRANSFORMER_TEXT_SEQ_LENGTH_DIM,
26
+ },
27
+ 'encoder_hidden_states_mask': {
28
+ 1: TRANSFORMER_TEXT_SEQ_LENGTH_DIM,
29
+ },
30
+ 'image_rotary_emb': ({
31
+ 0: TRANSFORMER_IMAGE_SEQ_LENGTH_DIM,
32
+ }, {
33
+ 0: TRANSFORMER_TEXT_SEQ_LENGTH_DIM,
34
+ }),
35
+ }
36
+
37
+
38
+ INDUCTOR_CONFIGS = {
39
+ 'conv_1x1_as_mm': True,
40
+ 'epilogue_fusion': False,
41
+ 'coordinate_descent_tuning': True,
42
+ 'coordinate_descent_check_all_directions': True,
43
+ 'max_autotune': True,
44
+ 'triton.cudagraphs': True,
45
+ }
46
+
47
+
48
+ def optimize_pipeline_(pipeline: Callable[P, Any], *args: P.args, **kwargs: P.kwargs):
49
+
50
+ @spaces.GPU(duration=1500)
51
+ def compile_transformer():
52
+
53
+ with spaces.aoti_capture(pipeline.transformer) as call:
54
+ pipeline(*args, **kwargs)
55
+
56
+ dynamic_shapes = tree_map(lambda t: None, call.kwargs)
57
+ dynamic_shapes |= TRANSFORMER_DYNAMIC_SHAPES
58
+
59
+ # quantize_(pipeline.transformer, Float8DynamicActivationFloat8WeightConfig())
60
+
61
+ exported = torch.export.export(
62
+ mod=pipeline.transformer,
63
+ args=call.args,
64
+ kwargs=call.kwargs,
65
+ dynamic_shapes=dynamic_shapes,
66
+ )
67
+
68
+ return spaces.aoti_compile(exported, INDUCTOR_CONFIGS)
69
+
70
+ spaces.aoti_apply(compile_transformer(), pipeline.transformer)
5d20bcde33825d82a8b53826b68c1df9a3d92eb7 ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ disaster_girl.jpg filter=lfs diff=lfs merge=lfs -text
37
+ grumpy.png filter=lfs diff=lfs merge=lfs -text
38
+ wednesday.png filter=lfs diff=lfs merge=lfs -text
39
+ monkey.jpg filter=lfs diff=lfs merge=lfs -text
40
+ tool_of_the_sea.png filter=lfs diff=lfs merge=lfs -text
716a990ffed0befd17c0073b25c154b990ad936d ADDED
Binary file (56.9 kB). View file
 
bcc151c0e8efe3aeb2b46170f5961675ee65e658 ADDED
@@ -0,0 +1,869 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import random
4
+ import torch
5
+ import spaces
6
+ import base64
7
+ from io import BytesIO
8
+
9
+ from PIL import Image
10
+ from diffusers import FlowMatchEulerDiscreteScheduler
11
+ from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
12
+ from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
13
+ #from diffusers import QwenImageEditPlusPipeline, QwenImageTransformer2DModel
14
+
15
+ import os
16
+ from gradio_client import Client, handle_file
17
+ import tempfile
18
+ from typing import Optional, Tuple, Any
19
+
20
+
21
+ # --- Model Loading ---
22
+ dtype = torch.bfloat16
23
+ device = "cuda" if torch.cuda.is_available() else "cpu"
24
+
25
+ pipe = QwenImageEditPlusPipeline.from_pretrained(
26
+ "Qwen/Qwen-Image-Edit-2509",
27
+ transformer=QwenImageTransformer2DModel.from_pretrained(
28
+ "linoyts/Qwen-Image-Edit-Rapid-AIO",
29
+ subfolder='transformer',
30
+ torch_dtype=dtype,
31
+ device_map='cuda'
32
+ ),
33
+ torch_dtype=dtype
34
+ ).to(device)
35
+
36
+ pipe.load_lora_weights(
37
+ "dx8152/Qwen-Edit-2509-Multiple-angles",
38
+ weight_name="镜头转换.safetensors",
39
+ adapter_name="angles"
40
+ )
41
+
42
+ pipe.set_adapters(["angles"], adapter_weights=[1.])
43
+ pipe.fuse_lora(adapter_names=["angles"], lora_scale=1.25)
44
+ pipe.unload_lora_weights()
45
+
46
+ spaces.aoti_blocks_load(pipe.transformer, "zerogpu-aoti/Qwen-Image", variant="fa3")
47
+
48
+ MAX_SEED = np.iinfo(np.int32).max
49
+
50
+
51
+ def _generate_video_segment(
52
+ input_image_path: str,
53
+ output_image_path: str,
54
+ prompt: str,
55
+ request: gr.Request
56
+ ) -> str:
57
+ """Generate a single video segment between two frames."""
58
+ x_ip_token = request.headers['x-ip-token']
59
+ video_client = Client(
60
+ "multimodalart/wan-2-2-first-last-frame",
61
+ headers={"x-ip-token": x_ip_token}
62
+ )
63
+ result = video_client.predict(
64
+ start_image_pil=handle_file(input_image_path),
65
+ end_image_pil=handle_file(output_image_path),
66
+ prompt=prompt,
67
+ api_name="/generate_video",
68
+ )
69
+ return result[0]["video"]
70
+
71
+
72
+ def build_camera_prompt(
73
+ rotate_deg: float = 0.0,
74
+ move_forward: float = 0.0,
75
+ vertical_tilt: float = 0.0,
76
+ wideangle: bool = False
77
+ ) -> str:
78
+ """Build a camera movement prompt based on the chosen controls."""
79
+ prompt_parts = []
80
+
81
+ if rotate_deg != 0:
82
+ direction = "left" if rotate_deg > 0 else "right"
83
+ if direction == "left":
84
+ prompt_parts.append(
85
+ f"将镜头向左旋转{abs(rotate_deg)}度 Rotate the camera {abs(rotate_deg)} degrees to the left."
86
+ )
87
+ else:
88
+ prompt_parts.append(
89
+ f"将镜头向右旋转{abs(rotate_deg)}度 Rotate the camera {abs(rotate_deg)} degrees to the right."
90
+ )
91
+
92
+ if move_forward > 5:
93
+ prompt_parts.append("将镜头转为特写镜头 Turn the camera to a close-up.")
94
+ elif move_forward >= 1:
95
+ prompt_parts.append("将镜头向前移动 Move the camera forward.")
96
+
97
+ if vertical_tilt <= -1:
98
+ prompt_parts.append("将相机转向鸟瞰视角 Turn the camera to a bird's-eye view.")
99
+ elif vertical_tilt >= 1:
100
+ prompt_parts.append("将相机切换到仰视视角 Turn the camera to a worm's-eye view.")
101
+
102
+ if wideangle:
103
+ prompt_parts.append("将镜头转为广角镜头 Turn the camera to a wide-angle lens.")
104
+
105
+ final_prompt = " ".join(prompt_parts).strip()
106
+ return final_prompt if final_prompt else "no camera movement"
107
+
108
+
109
+ @spaces.GPU
110
+ def infer_camera_edit(
111
+ image: Optional[Image.Image] = None,
112
+ rotate_deg: float = 0.0,
113
+ move_forward: float = 0.0,
114
+ vertical_tilt: float = 0.0,
115
+ wideangle: bool = False,
116
+ seed: int = 0,
117
+ randomize_seed: bool = True,
118
+ true_guidance_scale: float = 1.0,
119
+ num_inference_steps: int = 4,
120
+ height: Optional[int] = None,
121
+ width: Optional[int] = None,
122
+ prev_output: Optional[Image.Image] = None,
123
+ ) -> Tuple[Image.Image, int, str]:
124
+ """Edit the camera angles/view of an image with Qwen Image Edit 2509."""
125
+ progress = gr.Progress(track_tqdm=True)
126
+
127
+ prompt = build_camera_prompt(rotate_deg, move_forward, vertical_tilt, wideangle)
128
+ print(f"Generated Prompt: {prompt}")
129
+
130
+ if randomize_seed:
131
+ seed = random.randint(0, MAX_SEED)
132
+ generator = torch.Generator(device=device).manual_seed(seed)
133
+
134
+ pil_images = []
135
+ if image is not None:
136
+ if isinstance(image, Image.Image):
137
+ pil_images.append(image.convert("RGB"))
138
+ elif hasattr(image, "name"):
139
+ pil_images.append(Image.open(image.name).convert("RGB"))
140
+ elif prev_output:
141
+ pil_images.append(prev_output.convert("RGB"))
142
+
143
+ if len(pil_images) == 0:
144
+ raise gr.Error("Please upload an image first.")
145
+
146
+ if prompt == "no camera movement":
147
+ return image, seed, prompt
148
+
149
+ result = pipe(
150
+ image=pil_images,
151
+ prompt=prompt,
152
+ height=height if height != 0 else None,
153
+ width=width if width != 0 else None,
154
+ num_inference_steps=num_inference_steps,
155
+ generator=generator,
156
+ true_cfg_scale=true_guidance_scale,
157
+ num_images_per_prompt=1,
158
+ ).images[0]
159
+
160
+ return result, seed, prompt
161
+
162
+
163
+ def create_video_between_images(
164
+ input_image: Optional[Image.Image],
165
+ output_image: Optional[np.ndarray],
166
+ prompt: str,
167
+ request: gr.Request
168
+ ) -> str:
169
+ """Create a short transition video between the input and output images."""
170
+ if input_image is None or output_image is None:
171
+ raise gr.Error("Both input and output images are required to create a video.")
172
+
173
+ try:
174
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp:
175
+ input_image.save(tmp.name)
176
+ input_image_path = tmp.name
177
+
178
+ output_pil = Image.fromarray(output_image.astype('uint8'))
179
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp:
180
+ output_pil.save(tmp.name)
181
+ output_image_path = tmp.name
182
+
183
+ video_path = _generate_video_segment(
184
+ input_image_path,
185
+ output_image_path,
186
+ prompt if prompt else "Camera movement transformation",
187
+ request
188
+ )
189
+ return video_path
190
+ except Exception as e:
191
+ raise gr.Error(f"Video generation failed: {e}")
192
+
193
+
194
+ # --- 3D Camera Control Component for 2509 ---
195
+ CAMERA_3D_HTML_TEMPLATE = """
196
+ <div id="camera-control-wrapper" style="width: 100%; height: 400px; position: relative; background: #1a1a1a; border-radius: 12px; overflow: hidden;">
197
+ <div id="prompt-overlay" style="position: absolute; bottom: 10px; left: 50%; transform: translateX(-50%); background: rgba(0,0,0,0.8); padding: 8px 16px; border-radius: 8px; font-family: monospace; font-size: 11px; color: #00ff88; white-space: nowrap; z-index: 10; max-width: 90%; overflow: hidden; text-overflow: ellipsis;"></div>
198
+ <div id="control-legend" style="position: absolute; top: 10px; left: 10px; background: rgba(0,0,0,0.7); padding: 8px 12px; border-radius: 8px; font-family: system-ui; font-size: 11px; color: #fff; z-index: 10;">
199
+ <div style="margin-bottom: 4px;"><span style="color: #00ff88;">●</span> Rotation (↔)</div>
200
+ <div style="margin-bottom: 4px;"><span style="color: #ff69b4;">●</span> Vertical Tilt (↕)</div>
201
+ <div><span style="color: #ffa500;">●</span> Distance/Zoom</div>
202
+ </div>
203
+ </div>
204
+ """
205
+
206
+ CAMERA_3D_JS = """
207
+ (() => {
208
+ const wrapper = element.querySelector('#camera-control-wrapper');
209
+ const promptOverlay = element.querySelector('#prompt-overlay');
210
+
211
+ const initScene = () => {
212
+ if (typeof THREE === 'undefined') {
213
+ setTimeout(initScene, 100);
214
+ return;
215
+ }
216
+
217
+ const scene = new THREE.Scene();
218
+ scene.background = new THREE.Color(0x1a1a1a);
219
+
220
+ const camera = new THREE.PerspectiveCamera(50, wrapper.clientWidth / wrapper.clientHeight, 0.1, 1000);
221
+ camera.position.set(4, 3, 4);
222
+ camera.lookAt(0, 0.75, 0);
223
+
224
+ const renderer = new THREE.WebGLRenderer({ antialias: true });
225
+ renderer.setSize(wrapper.clientWidth, wrapper.clientHeight);
226
+ renderer.setPixelRatio(Math.min(window.devicePixelRatio, 2));
227
+ wrapper.insertBefore(renderer.domElement, wrapper.firstChild);
228
+
229
+ scene.add(new THREE.AmbientLight(0xffffff, 0.6));
230
+ const dirLight = new THREE.DirectionalLight(0xffffff, 0.6);
231
+ dirLight.position.set(5, 10, 5);
232
+ scene.add(dirLight);
233
+
234
+ scene.add(new THREE.GridHelper(6, 12, 0x333333, 0x222222));
235
+
236
+ const CENTER = new THREE.Vector3(0, 0.75, 0);
237
+ const BASE_DISTANCE = 2.0;
238
+ const ROTATION_RADIUS = 2.2;
239
+ const TILT_RADIUS = 1.6;
240
+
241
+ let rotateDeg = props.value?.rotate_deg || 0;
242
+ let moveForward = props.value?.move_forward || 0;
243
+ let verticalTilt = props.value?.vertical_tilt || 0;
244
+ let wideangle = props.value?.wideangle || false;
245
+
246
+ const rotateSteps = [-90, -45, 0, 45, 90];
247
+ const forwardSteps = [0, 5, 10];
248
+ const tiltSteps = [-1, 0, 1];
249
+
250
+ function snapToNearest(value, steps) {
251
+ return steps.reduce((prev, curr) => Math.abs(curr - value) < Math.abs(prev - value) ? curr : prev);
252
+ }
253
+
254
+ function createPlaceholderTexture() {
255
+ const canvas = document.createElement('canvas');
256
+ canvas.width = 256;
257
+ canvas.height = 256;
258
+ const ctx = canvas.getContext('2d');
259
+ ctx.fillStyle = '#3a3a4a';
260
+ ctx.fillRect(0, 0, 256, 256);
261
+ ctx.fillStyle = '#ffcc99';
262
+ ctx.beginPath();
263
+ ctx.arc(128, 128, 80, 0, Math.PI * 2);
264
+ ctx.fill();
265
+ ctx.fillStyle = '#333';
266
+ ctx.beginPath();
267
+ ctx.arc(100, 110, 10, 0, Math.PI * 2);
268
+ ctx.arc(156, 110, 10, 0, Math.PI * 2);
269
+ ctx.fill();
270
+ ctx.strokeStyle = '#333';
271
+ ctx.lineWidth = 3;
272
+ ctx.beginPath();
273
+ ctx.arc(128, 130, 35, 0.2, Math.PI - 0.2);
274
+ ctx.stroke();
275
+ return new THREE.CanvasTexture(canvas);
276
+ }
277
+
278
+ let currentTexture = createPlaceholderTexture();
279
+ const planeMaterial = new THREE.MeshBasicMaterial({ map: currentTexture, side: THREE.DoubleSide });
280
+ let targetPlane = new THREE.Mesh(new THREE.PlaneGeometry(1.2, 1.2), planeMaterial);
281
+ targetPlane.position.copy(CENTER);
282
+ scene.add(targetPlane);
283
+
284
+ function updateTextureFromUrl(url) {
285
+ if (!url) {
286
+ planeMaterial.map = createPlaceholderTexture();
287
+ planeMaterial.needsUpdate = true;
288
+ scene.remove(targetPlane);
289
+ targetPlane = new THREE.Mesh(new THREE.PlaneGeometry(1.2, 1.2), planeMaterial);
290
+ targetPlane.position.copy(CENTER);
291
+ scene.add(targetPlane);
292
+ return;
293
+ }
294
+
295
+ const loader = new THREE.TextureLoader();
296
+ loader.crossOrigin = 'anonymous';
297
+ loader.load(url, (texture) => {
298
+ texture.minFilter = THREE.LinearFilter;
299
+ texture.magFilter = THREE.LinearFilter;
300
+ planeMaterial.map = texture;
301
+ planeMaterial.needsUpdate = true;
302
+
303
+ const img = texture.image;
304
+ if (img && img.width && img.height) {
305
+ const aspect = img.width / img.height;
306
+ const maxSize = 1.4;
307
+ let planeWidth, planeHeight;
308
+ if (aspect > 1) {
309
+ planeWidth = maxSize;
310
+ planeHeight = maxSize / aspect;
311
+ } else {
312
+ planeHeight = maxSize;
313
+ planeWidth = maxSize * aspect;
314
+ }
315
+ scene.remove(targetPlane);
316
+ targetPlane = new THREE.Mesh(new THREE.PlaneGeometry(planeWidth, planeHeight), planeMaterial);
317
+ targetPlane.position.copy(CENTER);
318
+ scene.add(targetPlane);
319
+ }
320
+ });
321
+ }
322
+
323
+ if (props.imageUrl) {
324
+ updateTextureFromUrl(props.imageUrl);
325
+ }
326
+
327
+ const cameraGroup = new THREE.Group();
328
+ const bodyMat = new THREE.MeshStandardMaterial({ color: 0x6699cc, metalness: 0.5, roughness: 0.3 });
329
+ const body = new THREE.Mesh(new THREE.BoxGeometry(0.28, 0.2, 0.35), bodyMat);
330
+ cameraGroup.add(body);
331
+ const lens = new THREE.Mesh(
332
+ new THREE.CylinderGeometry(0.08, 0.1, 0.16, 16),
333
+ new THREE.MeshStandardMaterial({ color: 0x6699cc, metalness: 0.5, roughness: 0.3 })
334
+ );
335
+ lens.rotation.x = Math.PI / 2;
336
+ lens.position.z = 0.24;
337
+ cameraGroup.add(lens);
338
+ scene.add(cameraGroup);
339
+
340
+ const rotationArcPoints = [];
341
+ for (let i = 0; i <= 32; i++) {
342
+ const angle = THREE.MathUtils.degToRad(-90 + (180 * i / 32));
343
+ rotationArcPoints.push(new THREE.Vector3(ROTATION_RADIUS * Math.sin(angle), 0.05, ROTATION_RADIUS * Math.cos(angle)));
344
+ }
345
+ const rotationCurve = new THREE.CatmullRomCurve3(rotationArcPoints);
346
+ const rotationArc = new THREE.Mesh(
347
+ new THREE.TubeGeometry(rotationCurve, 32, 0.035, 8, false),
348
+ new THREE.MeshStandardMaterial({ color: 0x00ff88, emissive: 0x00ff88, emissiveIntensity: 0.3 })
349
+ );
350
+ scene.add(rotationArc);
351
+
352
+ const rotationHandle = new THREE.Mesh(
353
+ new THREE.SphereGeometry(0.16, 16, 16),
354
+ new THREE.MeshStandardMaterial({ color: 0x00ff88, emissive: 0x00ff88, emissiveIntensity: 0.5 })
355
+ );
356
+ rotationHandle.userData.type = 'rotation';
357
+ scene.add(rotationHandle);
358
+
359
+ const tiltArcPoints = [];
360
+ for (let i = 0; i <= 32; i++) {
361
+ const angle = THREE.MathUtils.degToRad(-45 + (90 * i / 32));
362
+ tiltArcPoints.push(new THREE.Vector3(-0.7, TILT_RADIUS * Math.sin(angle) + CENTER.y, TILT_RADIUS * Math.cos(angle)));
363
+ }
364
+ const tiltCurve = new THREE.CatmullRomCurve3(tiltArcPoints);
365
+ const tiltArc = new THREE.Mesh(
366
+ new THREE.TubeGeometry(tiltCurve, 32, 0.035, 8, false),
367
+ new THREE.MeshStandardMaterial({ color: 0xff69b4, emissive: 0xff69b4, emissiveIntensity: 0.3 })
368
+ );
369
+ scene.add(tiltArc);
370
+
371
+ const tiltHandle = new THREE.Mesh(
372
+ new THREE.SphereGeometry(0.16, 16, 16),
373
+ new THREE.MeshStandardMaterial({ color: 0xff69b4, emissive: 0xff69b4, emissiveIntensity: 0.5 })
374
+ );
375
+ tiltHandle.userData.type = 'tilt';
376
+ scene.add(tiltHandle);
377
+
378
+ const distanceLineGeo = new THREE.BufferGeometry();
379
+ const distanceLine = new THREE.Line(distanceLineGeo, new THREE.LineBasicMaterial({ color: 0xffa500 }));
380
+ scene.add(distanceLine);
381
+
382
+ const distanceHandle = new THREE.Mesh(
383
+ new THREE.SphereGeometry(0.16, 16, 16),
384
+ new THREE.MeshStandardMaterial({ color: 0xffa500, emissive: 0xffa500, emissiveIntensity: 0.5 })
385
+ );
386
+ distanceHandle.userData.type = 'distance';
387
+ scene.add(distanceHandle);
388
+
389
+ function buildPromptText(rot, fwd, tilt, wide) {
390
+ const parts = [];
391
+ if (rot !== 0) {
392
+ const dir = rot > 0 ? 'left' : 'right';
393
+ parts.push('Rotate ' + Math.abs(rot) + '° ' + dir);
394
+ }
395
+ if (fwd > 5) parts.push('Close-up');
396
+ else if (fwd >= 1) parts.push('Move forward');
397
+ if (tilt <= -1) parts.push("Bird's-eye");
398
+ else if (tilt >= 1) parts.push("Worm's-eye");
399
+ if (wide) parts.push('Wide-angle');
400
+ return parts.length > 0 ? parts.join(' • ') : 'No camera movement';
401
+ }
402
+
403
+ function updatePositions() {
404
+ const rotRad = THREE.MathUtils.degToRad(-rotateDeg);
405
+ const distance = BASE_DISTANCE - (moveForward / 10) * 1.0;
406
+ // Invert: worm's-eye (1) = camera DOWN, bird's-eye (-1) = camera UP
407
+ const tiltAngle = -verticalTilt * 35;
408
+ const tiltRad = THREE.MathUtils.degToRad(tiltAngle);
409
+
410
+ const camX = distance * Math.sin(rotRad) * Math.cos(tiltRad);
411
+ const camY = distance * Math.sin(tiltRad) + CENTER.y;
412
+ const camZ = distance * Math.cos(rotRad) * Math.cos(tiltRad);
413
+
414
+ cameraGroup.position.set(camX, camY, camZ);
415
+ cameraGroup.lookAt(CENTER);
416
+
417
+ rotationHandle.position.set(ROTATION_RADIUS * Math.sin(rotRad), 0.05, ROTATION_RADIUS * Math.cos(rotRad));
418
+
419
+ const tiltHandleAngle = THREE.MathUtils.degToRad(tiltAngle);
420
+ tiltHandle.position.set(-0.7, TILT_RADIUS * Math.sin(tiltHandleAngle) + CENTER.y, TILT_RADIUS * Math.cos(tiltHandleAngle));
421
+
422
+ const handleDist = distance - 0.4;
423
+ distanceHandle.position.set(
424
+ handleDist * Math.sin(rotRad) * Math.cos(tiltRad),
425
+ handleDist * Math.sin(tiltRad) + CENTER.y,
426
+ handleDist * Math.cos(rotRad) * Math.cos(tiltRad)
427
+ );
428
+ distanceLineGeo.setFromPoints([cameraGroup.position.clone(), CENTER.clone()]);
429
+
430
+ promptOverlay.textContent = buildPromptText(rotateDeg, moveForward, verticalTilt, wideangle);
431
+ }
432
+
433
+ function updatePropsAndTrigger() {
434
+ const rotSnap = snapToNearest(rotateDeg, rotateSteps);
435
+ const fwdSnap = snapToNearest(moveForward, forwardSteps);
436
+ const tiltSnap = snapToNearest(verticalTilt, tiltSteps);
437
+
438
+ props.value = { rotate_deg: rotSnap, move_forward: fwdSnap, vertical_tilt: tiltSnap, wideangle: wideangle };
439
+ trigger('change', props.value);
440
+ }
441
+
442
+ const raycaster = new THREE.Raycaster();
443
+ const mouse = new THREE.Vector2();
444
+ let isDragging = false;
445
+ let dragTarget = null;
446
+ let dragStartMouse = new THREE.Vector2();
447
+ let dragStartForward = 0;
448
+ const intersection = new THREE.Vector3();
449
+
450
+ const canvas = renderer.domElement;
451
+
452
+ canvas.addEventListener('mousedown', (e) => {
453
+ const rect = canvas.getBoundingClientRect();
454
+ mouse.x = ((e.clientX - rect.left) / rect.width) * 2 - 1;
455
+ mouse.y = -((e.clientY - rect.top) / rect.height) * 2 + 1;
456
+
457
+ raycaster.setFromCamera(mouse, camera);
458
+ const intersects = raycaster.intersectObjects([rotationHandle, tiltHandle, distanceHandle]);
459
+
460
+ if (intersects.length > 0) {
461
+ isDragging = true;
462
+ dragTarget = intersects[0].object;
463
+ dragTarget.material.emissiveIntensity = 1.0;
464
+ dragTarget.scale.setScalar(1.3);
465
+ dragStartMouse.copy(mouse);
466
+ dragStartForward = moveForward;
467
+ canvas.style.cursor = 'grabbing';
468
+ }
469
+ });
470
+
471
+ canvas.addEventListener('mousemove', (e) => {
472
+ const rect = canvas.getBoundingClientRect();
473
+ mouse.x = ((e.clientX - rect.left) / rect.width) * 2 - 1;
474
+ mouse.y = -((e.clientY - rect.top) / rect.height) * 2 + 1;
475
+
476
+ if (isDragging && dragTarget) {
477
+ raycaster.setFromCamera(mouse, camera);
478
+
479
+ if (dragTarget.userData.type === 'rotation') {
480
+ const plane = new THREE.Plane(new THREE.Vector3(0, 1, 0), -0.05);
481
+ if (raycaster.ray.intersectPlane(plane, intersection)) {
482
+ let angle = THREE.MathUtils.radToDeg(Math.atan2(intersection.x, intersection.z));
483
+ rotateDeg = THREE.MathUtils.clamp(-angle, -90, 90);
484
+ }
485
+ } else if (dragTarget.userData.type === 'tilt') {
486
+ const plane = new THREE.Plane(new THREE.Vector3(1, 0, 0), 0.7);
487
+ if (raycaster.ray.intersectPlane(plane, intersection)) {
488
+ const relY = intersection.y - CENTER.y;
489
+ const relZ = intersection.z;
490
+ const angle = THREE.MathUtils.radToDeg(Math.atan2(relY, relZ));
491
+ // Invert: drag DOWN = worm's-eye (1), drag UP = bird's-eye (-1)
492
+ verticalTilt = THREE.MathUtils.clamp(-angle / 35, -1, 1);
493
+ }
494
+ } else if (dragTarget.userData.type === 'distance') {
495
+ const deltaY = mouse.y - dragStartMouse.y;
496
+ moveForward = THREE.MathUtils.clamp(dragStartForward + deltaY * 12, 0, 10);
497
+ }
498
+ updatePositions();
499
+ } else {
500
+ raycaster.setFromCamera(mouse, camera);
501
+ const intersects = raycaster.intersectObjects([rotationHandle, tiltHandle, distanceHandle]);
502
+ [rotationHandle, tiltHandle, distanceHandle].forEach(h => {
503
+ h.material.emissiveIntensity = 0.5;
504
+ h.scale.setScalar(1);
505
+ });
506
+ if (intersects.length > 0) {
507
+ intersects[0].object.material.emissiveIntensity = 0.8;
508
+ intersects[0].object.scale.setScalar(1.1);
509
+ canvas.style.cursor = 'grab';
510
+ } else {
511
+ canvas.style.cursor = 'default';
512
+ }
513
+ }
514
+ });
515
+
516
+ const onMouseUp = () => {
517
+ if (dragTarget) {
518
+ dragTarget.material.emissiveIntensity = 0.5;
519
+ dragTarget.scale.setScalar(1);
520
+
521
+ const targetRot = snapToNearest(rotateDeg, rotateSteps);
522
+ const targetFwd = snapToNearest(moveForward, forwardSteps);
523
+ const targetTilt = snapToNearest(verticalTilt, tiltSteps);
524
+
525
+ const startRot = rotateDeg, startFwd = moveForward, startTilt = verticalTilt;
526
+ const startTime = Date.now();
527
+
528
+ function animateSnap() {
529
+ const t = Math.min((Date.now() - startTime) / 200, 1);
530
+ const ease = 1 - Math.pow(1 - t, 3);
531
+
532
+ rotateDeg = startRot + (targetRot - startRot) * ease;
533
+ moveForward = startFwd + (targetFwd - startFwd) * ease;
534
+ verticalTilt = startTilt + (targetTilt - startTilt) * ease;
535
+
536
+ updatePositions();
537
+ if (t < 1) requestAnimationFrame(animateSnap);
538
+ else updatePropsAndTrigger();
539
+ }
540
+ animateSnap();
541
+ }
542
+ isDragging = false;
543
+ dragTarget = null;
544
+ canvas.style.cursor = 'default';
545
+ };
546
+
547
+ canvas.addEventListener('mouseup', onMouseUp);
548
+ canvas.addEventListener('mouseleave', onMouseUp);
549
+
550
+ canvas.addEventListener('touchstart', (e) => {
551
+ e.preventDefault();
552
+ const touch = e.touches[0];
553
+ const rect = canvas.getBoundingClientRect();
554
+ mouse.x = ((touch.clientX - rect.left) / rect.width) * 2 - 1;
555
+ mouse.y = -((touch.clientY - rect.top) / rect.height) * 2 + 1;
556
+
557
+ raycaster.setFromCamera(mouse, camera);
558
+ const intersects = raycaster.intersectObjects([rotationHandle, tiltHandle, distanceHandle]);
559
+
560
+ if (intersects.length > 0) {
561
+ isDragging = true;
562
+ dragTarget = intersects[0].object;
563
+ dragTarget.material.emissiveIntensity = 1.0;
564
+ dragTarget.scale.setScalar(1.3);
565
+ dragStartMouse.copy(mouse);
566
+ dragStartForward = moveForward;
567
+ }
568
+ }, { passive: false });
569
+
570
+ canvas.addEventListener('touchmove', (e) => {
571
+ e.preventDefault();
572
+ const touch = e.touches[0];
573
+ const rect = canvas.getBoundingClientRect();
574
+ mouse.x = ((touch.clientX - rect.left) / rect.width) * 2 - 1;
575
+ mouse.y = -((touch.clientY - rect.top) / rect.height) * 2 + 1;
576
+
577
+ if (isDragging && dragTarget) {
578
+ raycaster.setFromCamera(mouse, camera);
579
+
580
+ if (dragTarget.userData.type === 'rotation') {
581
+ const plane = new THREE.Plane(new THREE.Vector3(0, 1, 0), -0.05);
582
+ if (raycaster.ray.intersectPlane(plane, intersection)) {
583
+ let angle = THREE.MathUtils.radToDeg(Math.atan2(intersection.x, intersection.z));
584
+ rotateDeg = THREE.MathUtils.clamp(-angle, -90, 90);
585
+ }
586
+ } else if (dragTarget.userData.type === 'tilt') {
587
+ const plane = new THREE.Plane(new THREE.Vector3(1, 0, 0), 0.7);
588
+ if (raycaster.ray.intersectPlane(plane, intersection)) {
589
+ const relY = intersection.y - CENTER.y;
590
+ const relZ = intersection.z;
591
+ const angle = THREE.MathUtils.radToDeg(Math.atan2(relY, relZ));
592
+ // Invert: drag DOWN = worm's-eye (1), drag UP = bird's-eye (-1)
593
+ verticalTilt = THREE.MathUtils.clamp(-angle / 35, -1, 1);
594
+ }
595
+ } else if (dragTarget.userData.type === 'distance') {
596
+ const deltaY = mouse.y - dragStartMouse.y;
597
+ moveForward = THREE.MathUtils.clamp(dragStartForward + deltaY * 12, 0, 10);
598
+ }
599
+ updatePositions();
600
+ }
601
+ }, { passive: false });
602
+
603
+ canvas.addEventListener('touchend', (e) => { e.preventDefault(); onMouseUp(); }, { passive: false });
604
+ canvas.addEventListener('touchcancel', (e) => { e.preventDefault(); onMouseUp(); }, { passive: false });
605
+
606
+ updatePositions();
607
+
608
+ function render() {
609
+ requestAnimationFrame(render);
610
+ renderer.render(scene, camera);
611
+ }
612
+ render();
613
+
614
+ new ResizeObserver(() => {
615
+ camera.aspect = wrapper.clientWidth / wrapper.clientHeight;
616
+ camera.updateProjectionMatrix();
617
+ renderer.setSize(wrapper.clientWidth, wrapper.clientHeight);
618
+ }).observe(wrapper);
619
+
620
+ wrapper._updateTexture = updateTextureFromUrl;
621
+
622
+ let lastImageUrl = props.imageUrl;
623
+ let lastValue = JSON.stringify(props.value);
624
+ setInterval(() => {
625
+ if (props.imageUrl !== lastImageUrl) {
626
+ lastImageUrl = props.imageUrl;
627
+ updateTextureFromUrl(props.imageUrl);
628
+ }
629
+ const currentValue = JSON.stringify(props.value);
630
+ if (currentValue !== lastValue) {
631
+ lastValue = currentValue;
632
+ if (props.value && typeof props.value === 'object') {
633
+ rotateDeg = props.value.rotate_deg ?? rotateDeg;
634
+ moveForward = props.value.move_forward ?? moveForward;
635
+ verticalTilt = props.value.vertical_tilt ?? verticalTilt;
636
+ wideangle = props.value.wideangle ?? wideangle;
637
+ updatePositions();
638
+ }
639
+ }
640
+ }, 100);
641
+ };
642
+
643
+ initScene();
644
+ })();
645
+ """
646
+
647
+
648
+ def create_camera_3d_component(value=None, imageUrl=None, **kwargs):
649
+ """Create a 3D camera control component using gr.HTML."""
650
+ if value is None:
651
+ value = {"rotate_deg": 0, "move_forward": 0, "vertical_tilt": 0, "wideangle": False}
652
+
653
+ return gr.HTML(
654
+ value=value,
655
+ html_template=CAMERA_3D_HTML_TEMPLATE,
656
+ js_on_load=CAMERA_3D_JS,
657
+ imageUrl=imageUrl,
658
+ **kwargs
659
+ )
660
+
661
+
662
+ # --- UI ---
663
+ css = '''
664
+ #col-container { max-width: 1100px; margin: 0 auto; }
665
+ .dark .progress-text { color: white !important; }
666
+ #camera-3d-control { min-height: 400px; }
667
+ #examples {
668
+ margin-top: 20px;
669
+ }
670
+ .fillable{max-width: 1200px !important}
671
+ '''
672
+
673
+
674
+ def reset_all() -> list:
675
+ """Reset all camera control knobs and flags to their default values."""
676
+ return [0, 0, 0, False, True]
677
+
678
+
679
+ def end_reset() -> bool:
680
+ """Mark the end of a reset cycle."""
681
+ return False
682
+
683
+
684
+ def update_dimensions_on_upload(image: Optional[Image.Image]) -> Tuple[int, int]:
685
+ """Compute recommended (width, height) for the output resolution."""
686
+ if image is None:
687
+ return 1024, 1024
688
+
689
+ original_width, original_height = image.size
690
+
691
+ if original_width > original_height:
692
+ new_width = 1024
693
+ aspect_ratio = original_height / original_width
694
+ new_height = int(new_width * aspect_ratio)
695
+ else:
696
+ new_height = 1024
697
+ aspect_ratio = original_width / original_height
698
+ new_width = int(new_height * aspect_ratio)
699
+
700
+ new_width = (new_width // 8) * 8
701
+ new_height = (new_height // 8) * 8
702
+
703
+ return new_width, new_height
704
+
705
+
706
+ with gr.Blocks() as demo:
707
+ gr.Markdown("""
708
+ ## 🎬 Qwen Image Edit — Camera Angle Control
709
+
710
+ Qwen Image Edit 2509 for Camera Control ✨
711
+ Using [dx8152's Qwen-Edit-2509-Multiple-angles LoRA](https://huggingface.co/dx8152/Qwen-Edit-2509-Multiple-angles) and [Phr00t/Qwen-Image-Edit-Rapid-AIO](https://huggingface.co/Phr00t/Qwen-Image-Edit-Rapid-AIO/tree/main) for 4-step inference 💨
712
+ """)
713
+
714
+ with gr.Row():
715
+ with gr.Column(scale=1):
716
+
717
+ image = gr.Image(label="Input Image", type="pil", height=280)
718
+ prev_output = gr.Image(value=None, visible=False)
719
+ is_reset = gr.Checkbox(value=False, visible=False)
720
+
721
+ with gr.Tab("🎮 3D Camera Control"):
722
+ # gr.Markdown("*Drag the handles: 🟢 Rotation, 🩷 Tilt, 🟠 Distance*")
723
+
724
+ camera_3d = create_camera_3d_component(
725
+ value={"rotate_deg": 0, "move_forward": 0, "vertical_tilt": 0, "wideangle": False},
726
+ elem_id="camera-3d-control"
727
+ )
728
+ with gr.Tab("🎚️ Slider Controls"):
729
+ rotate_deg = gr.Slider(label="Rotate Right ↔ Left (°)", minimum=-90, maximum=90, step=45, value=0)
730
+ move_forward = gr.Slider(label="Move Forward → Close-Up", minimum=0, maximum=10, step=5, value=0)
731
+ vertical_tilt = gr.Slider(label="Vertical: Bird's-eye ↔ Worm's-eye", minimum=-1, maximum=1, step=1, value=0)
732
+ wideangle = gr.Checkbox(label="🔭 Wide-Angle Lens", value=False)
733
+
734
+ with gr.Row():
735
+ reset_btn = gr.Button("🔄 Reset")
736
+ run_btn = gr.Button("🚀 Generate", variant="primary")
737
+
738
+ with gr.Column(scale=1):
739
+ result = gr.Image(label="Output Image", interactive=False, height=350)
740
+ prompt_preview = gr.Textbox(label="Generated Prompt", interactive=False)
741
+
742
+ create_video_button = gr.Button(
743
+ "🎥 Create Video Between Images",
744
+ variant="secondary",
745
+ visible=False
746
+ )
747
+ with gr.Group(visible=False) as video_group:
748
+ video_output = gr.Video(label="Generated Video", buttons=["download"], autoplay=True)
749
+
750
+
751
+ with gr.Accordion("⚙️ Advanced Settings", open=False):
752
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
753
+ randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
754
+ true_guidance_scale = gr.Slider(label="True Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
755
+ num_inference_steps = gr.Slider(label="Inference Steps", minimum=1, maximum=40, step=1, value=4)
756
+ height = gr.Slider(label="Height", minimum=256, maximum=2048, step=8, value=1024)
757
+ width = gr.Slider(label="Width", minimum=256, maximum=2048, step=8, value=1024)
758
+
759
+ # --- Helper Functions ---
760
+ def update_prompt_from_sliders(rotate, forward, tilt, wide):
761
+ return build_camera_prompt(rotate, forward, tilt, wide)
762
+
763
+ def sync_3d_to_sliders(camera_value):
764
+ if camera_value and isinstance(camera_value, dict):
765
+ rot = camera_value.get('rotate_deg', 0)
766
+ fwd = camera_value.get('move_forward', 0)
767
+ tilt = camera_value.get('vertical_tilt', 0)
768
+ wide = camera_value.get('wideangle', False)
769
+ prompt = build_camera_prompt(rot, fwd, tilt, wide)
770
+ return rot, fwd, tilt, wide, prompt
771
+ return gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
772
+
773
+ def sync_sliders_to_3d(rotate, forward, tilt, wide):
774
+ return {"rotate_deg": rotate, "move_forward": forward, "vertical_tilt": tilt, "wideangle": wide}
775
+
776
+ def update_3d_image(img):
777
+ if img is None:
778
+ return gr.update(imageUrl=None)
779
+ buffered = BytesIO()
780
+ img.save(buffered, format="PNG")
781
+ img_str = base64.b64encode(buffered.getvalue()).decode()
782
+ data_url = f"data:image/png;base64,{img_str}"
783
+ return gr.update(imageUrl=data_url)
784
+
785
+ # Define inputs/outputs
786
+ inputs = [image, rotate_deg, move_forward, vertical_tilt, wideangle, seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width, prev_output]
787
+ outputs = [result, seed, prompt_preview]
788
+ control_inputs = [image, rotate_deg, move_forward, vertical_tilt, wideangle, seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width, prev_output]
789
+ control_inputs_with_flag = [is_reset] + control_inputs
790
+
791
+ def maybe_infer(is_reset_val: bool, progress: gr.Progress = gr.Progress(track_tqdm=True), *args: Any):
792
+ if is_reset_val:
793
+ return gr.update(), gr.update(), gr.update(), gr.update()
794
+ result_img, result_seed, result_prompt = infer_camera_edit(*args)
795
+ show_button = args[0] is not None and result_img is not None
796
+ return result_img, result_seed, result_prompt, gr.update(visible=show_button)
797
+
798
+ # --- Event Handlers ---
799
+
800
+ # Slider -> Prompt preview
801
+ for slider in [rotate_deg, move_forward, vertical_tilt]:
802
+ slider.change(fn=update_prompt_from_sliders, inputs=[rotate_deg, move_forward, vertical_tilt, wideangle], outputs=[prompt_preview])
803
+ wideangle.change(fn=update_prompt_from_sliders, inputs=[rotate_deg, move_forward, vertical_tilt, wideangle], outputs=[prompt_preview])
804
+
805
+ # 3D control -> Sliders + Prompt + Inference
806
+ camera_3d.change(
807
+ fn=sync_3d_to_sliders,
808
+ inputs=[camera_3d],
809
+ outputs=[rotate_deg, move_forward, vertical_tilt, wideangle, prompt_preview]
810
+ ).then(
811
+ fn=maybe_infer,
812
+ inputs=control_inputs_with_flag,
813
+ outputs=outputs + [create_video_button]
814
+ )
815
+
816
+ # Sliders -> 3D control
817
+ for slider in [rotate_deg, move_forward, vertical_tilt]:
818
+ slider.release(fn=sync_sliders_to_3d, inputs=[rotate_deg, move_forward, vertical_tilt, wideangle], outputs=[camera_3d])
819
+ wideangle.input(fn=sync_sliders_to_3d, inputs=[rotate_deg, move_forward, vertical_tilt, wideangle], outputs=[camera_3d])
820
+
821
+ # Reset
822
+ reset_btn.click(fn=reset_all, inputs=None, outputs=[rotate_deg, move_forward, vertical_tilt, wideangle, is_reset], queue=False
823
+ ).then(fn=end_reset, inputs=None, outputs=[is_reset], queue=False
824
+ ).then(fn=sync_sliders_to_3d, inputs=[rotate_deg, move_forward, vertical_tilt, wideangle], outputs=[camera_3d])
825
+
826
+ # Generate button
827
+ def infer_and_show_video_button(*args: Any):
828
+ result_img, result_seed, result_prompt = infer_camera_edit(*args)
829
+ show_button = args[0] is not None and result_img is not None
830
+ return result_img, result_seed, result_prompt, gr.update(visible=show_button)
831
+
832
+ run_event = run_btn.click(fn=infer_and_show_video_button, inputs=inputs, outputs=outputs + [create_video_button])
833
+
834
+ # Video creation
835
+ create_video_button.click(fn=lambda: gr.update(visible=True), outputs=[video_group], api_visibility="private"
836
+ ).then(fn=create_video_between_images, inputs=[image, result, prompt_preview], outputs=[video_output], api_visibility="private")
837
+
838
+ # Image upload
839
+ image.upload(fn=update_dimensions_on_upload, inputs=[image], outputs=[width, height]
840
+ ).then(fn=reset_all, inputs=None, outputs=[rotate_deg, move_forward, vertical_tilt, wideangle, is_reset], queue=False
841
+ ).then(fn=end_reset, inputs=None, outputs=[is_reset], queue=False
842
+ ).then(fn=update_3d_image, inputs=[image], outputs=[camera_3d])
843
+
844
+ image.clear(fn=lambda: gr.update(imageUrl=None), outputs=[camera_3d])
845
+
846
+ run_event.then(lambda img, *_: img, inputs=[result], outputs=[prev_output])
847
+
848
+
849
+ gr.Examples(
850
+ examples=[
851
+ ["tool_of_the_sea.png", 90, 0, 0, False, 0, True, 1.0, 4, 568, 1024],
852
+ ["monkey.jpg", -90, 0, 0, False, 0, True, 1.0, 4, 704, 1024],
853
+ ["metropolis.jpg", 0, 0, -1, False, 0, True, 1.0, 4, 816, 1024],
854
+ ["disaster_girl.jpg", -45, 0, 1, False, 0, True, 1.0, 4, 768, 1024],
855
+ ["grumpy.png", 90, 0, 1, False, 0, True, 1.0, 4, 576, 1024]
856
+ ],
857
+ inputs=[image, rotate_deg, move_forward, vertical_tilt, wideangle, seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width],
858
+ outputs=outputs,
859
+ fn=infer_camera_edit,
860
+ cache_examples=True,
861
+ cache_mode="lazy",
862
+ elem_id="examples"
863
+ )
864
+
865
+ gr.api(infer_camera_edit, api_name="infer_edit_camera_angles")
866
+ gr.api(create_video_between_images, api_name="create_video_between_images")
867
+
868
+ head = '<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r128/three.min.js"></script>'
869
+ demo.launch(mcp_server=True, css=css, theme=gr.themes.Citrus(), head=head, footer_links=["api", "gradio", "settings"])
c64a8665df602aaf6708ab5ec026b54e32bfffec ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Qwen Image Edit Camera Control
3
+ emoji: 🎬
4
+ colorFrom: indigo
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 6.2.0
8
+ app_file: app.py
9
+ pinned: true
10
+ license: apache-2.0
11
+ short_description: Fast 4 step inference with Qwen Image Edit 2509
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
e49ad4d8e649dc6e8f38356dc7b3ea1de5a3c112b58a61ed321f6c107810a93d ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e49ad4d8e649dc6e8f38356dc7b3ea1de5a3c112b58a61ed321f6c107810a93d
3
+ size 3325557
fe60fdf8b8807c0059e888c42d86c919781a1be3 ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ git+https://github.com/huggingface/diffusers.git
2
+
3
+
4
+
5
+ transformers
6
+ accelerate
7
+ safetensors
8
+ sentencepiece
9
+ dashscope
10
+ kernels
11
+ torchvision
12
+ peft
13
+ torchao==0.11.0
14
+ torch==2.8