Spaces:
Running
on
Zero
Running
on
Zero
| import gradio as gr | |
| import numpy as np | |
| import random | |
| import torch | |
| import spaces | |
| from PIL import Image | |
| from diffusers import FlowMatchEulerDiscreteScheduler | |
| from optimization import optimize_pipeline_ | |
| from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline | |
| from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel | |
| from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3 | |
| import math | |
| from huggingface_hub import hf_hub_download | |
| from safetensors.torch import load_file | |
| from PIL import Image | |
| import os | |
| import gradio as gr | |
| from gradio_client import Client, handle_file | |
| import tempfile | |
| # --- Model Loading --- | |
| dtype = torch.bfloat16 | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509", | |
| transformer= QwenImageTransformer2DModel.from_pretrained("linoyts/Qwen-Image-Edit-Rapid-AIO", | |
| subfolder='transformer', | |
| torch_dtype=dtype, | |
| device_map='cuda'),torch_dtype=dtype).to(device) | |
| pipe.load_lora_weights("autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime", adapter_name="anime") | |
| pipe.set_adapters(["anime"], adapter_weights=[1.]) | |
| pipe.fuse_lora(adapter_names=["anime"], lora_scale=1.0) | |
| pipe.unload_lora_weights() | |
| pipe.transformer.__class__ = QwenImageTransformer2DModel | |
| pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3()) | |
| optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))], prompt="prompt") | |
| MAX_SEED = np.iinfo(np.int32).max | |
| def _generate_video_segment(input_image_path: str, output_image_path: str, prompt: str, request: gr.Request) -> str: | |
| """Generates a single video segment using the external service.""" | |
| x_ip_token = request.headers['x-ip-token'] | |
| video_client = Client("multimodalart/wan-2-2-first-last-frame", headers={"x-ip-token": x_ip_token}) | |
| result = video_client.predict( | |
| start_image_pil=handle_file(input_image_path), | |
| end_image_pil=handle_file(output_image_path), | |
| prompt=prompt, api_name="/generate_video", | |
| ) | |
| return result[0]["video"] | |
| def convert_to_anime( | |
| image, | |
| seed, | |
| randomize_seed, | |
| true_guidance_scale, | |
| num_inference_steps, | |
| height, | |
| width, | |
| progress=gr.Progress(track_tqdm=True) | |
| ): | |
| prompt = "Convert this photo to anime style" | |
| if randomize_seed: | |
| seed = random.randint(0, MAX_SEED) | |
| generator = torch.Generator(device=device).manual_seed(seed) | |
| pil_images = [] | |
| if image is not None: | |
| if isinstance(image, Image.Image): | |
| pil_images.append(image.convert("RGB")) | |
| elif hasattr(image, "name"): | |
| pil_images.append(Image.open(image.name).convert("RGB")) | |
| if len(pil_images) == 0: | |
| raise gr.Error("Please upload an image first.") | |
| result = pipe( | |
| image=pil_images, | |
| prompt=prompt, | |
| height=height if height != 0 else None, | |
| width=width if width != 0 else None, | |
| num_inference_steps=num_inference_steps, | |
| generator=generator, | |
| true_cfg_scale=true_guidance_scale, | |
| num_images_per_prompt=1, | |
| ).images[0] | |
| return result, seed | |
| # --- UI --- | |
| css = ''' | |
| #col-container { | |
| max-width: 900px; | |
| margin: 0 auto; | |
| padding: 2rem; | |
| font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif; | |
| } | |
| .gradio-container { | |
| background: linear-gradient(to bottom, #f5f5f7, #ffffff); | |
| } | |
| #title { | |
| text-align: center; | |
| font-size: 2.5rem; | |
| font-weight: 600; | |
| color: #1d1d1f; | |
| margin-bottom: 0.5rem; | |
| } | |
| #description { | |
| text-align: center; | |
| font-size: 1.1rem; | |
| color: #6e6e73; | |
| margin-bottom: 2rem; | |
| } | |
| .image-container { | |
| border-radius: 18px; | |
| overflow: hidden; | |
| box-shadow: 0 4px 6px rgba(0, 0, 0, 0.07); | |
| } | |
| #convert-btn { | |
| background: linear-gradient(180deg, #0071e3 0%, #0077ed 100%); | |
| border: none; | |
| border-radius: 12px; | |
| color: white; | |
| font-size: 1.1rem; | |
| font-weight: 500; | |
| padding: 0.75rem 2rem; | |
| transition: all 0.3s ease; | |
| } | |
| #convert-btn:hover { | |
| transform: translateY(-2px); | |
| box-shadow: 0 8px 16px rgba(0, 113, 227, 0.3); | |
| } | |
| ''' | |
| def update_dimensions_on_upload(image): | |
| if image is None: | |
| return 1024, 1024 | |
| original_width, original_height = image.size | |
| if original_width > original_height: | |
| new_width = 1024 | |
| aspect_ratio = original_height / original_width | |
| new_height = int(new_width * aspect_ratio) | |
| else: | |
| new_height = 1024 | |
| aspect_ratio = original_width / original_height | |
| new_width = int(new_height * aspect_ratio) | |
| # Ensure dimensions are multiples of 8 | |
| new_width = (new_width // 8) * 8 | |
| new_height = (new_height // 8) * 8 | |
| return new_width, new_height | |
| ["tool_of_the_sea.png", 90, 0, 0, False, 0, True, 1.0, 4, 568, 1024], | |
| ["monkey.jpg", -90, 0, 0, False, 0, True, 1.0, 4, 704, 1024], | |
| ["metropolis.jpg", 0, 0, -1, False, 0, True, 1.0, 4, 816, 1024], | |
| ["disaster_girl.jpg", -45, 0, 1, False, 0, True, 1.0, 4, 768, 1024], | |
| ["grumpy.png", 90, 0, 1, False, 0, True, 1.0, 4, 576, 1024] | |
| ], | |
| inputs=[image,rotate_deg, move_forward, | |
| vertical_tilt, wideangle, | |
| seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width], | |
| outputs=outputs, | |
| fn=infer_camera_edit, | |
| cache_examples="lazy", | |
| elem_id="examples" | |
| ) | |
| # Image upload triggers dimension update and control reset | |
| image.upload( | |
| fn=update_dimensions_on_upload, | |
| inputs=[image], | |
| outputs=[width, height] | |
| ).then( | |
| fn=reset_all, | |
| inputs=None, | |
| outputs=[rotate_deg, move_forward, vertical_tilt, wideangle, is_reset], | |
| queue=False | |
| ).then( | |
| fn=end_reset, | |
| inputs=None, | |
| outputs=[is_reset], | |
| queue=False | |
| ) | |
| # Live updates | |
| def maybe_infer(is_reset, progress=gr.Progress(track_tqdm=True), *args): | |
| if is_reset: | |
| return gr.update(), gr.update(), gr.update(), gr.update() | |
| else: | |
| result_img, result_seed, result_prompt = infer_camera_edit(*args) | |
| # Show video button if we have both input and output | |
| show_button = args[0] is not None and result_img is not None | |
| return result_img, result_seed, result_prompt, gr.update(visible=show_button) | |
| control_inputs = [ | |
| image, rotate_deg, move_forward, | |
| vertical_tilt, wideangle, | |
| seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width, prev_output | |
| ] | |
| control_inputs_with_flag = [is_reset] + control_inputs | |
| for control in [rotate_deg, move_forward, vertical_tilt]: | |
| control.release(fn=maybe_infer, inputs=control_inputs_with_flag, outputs=outputs + [create_video_button]) | |
| wideangle.input(fn=maybe_infer, inputs=control_inputs_with_flag, outputs=outputs + [create_video_button]) | |
| run_event.then(lambda img, *_: img, inputs=[result], outputs=[prev_output]) | |
| demo.launch() |