prithivMLmods's picture
update app
4c2f1aa verified
raw
history blame
12.5 kB
import os
import gradio as gr
import numpy as np
import torch
import random
from PIL import Image
from typing import Iterable
from gradio.themes import Soft
from gradio.themes.utils import colors, fonts, sizes
# --- Handle optional 'spaces' import for local compatibility ---
try:
import spaces
except ImportError:
class spaces:
@staticmethod
def GPU(duration=30):
def decorator(func):
return func
return decorator
# --- Custom Theme Setup (Steel Blue) ---
colors.steel_blue = colors.Color(
name="steel_blue",
c50="#EBF3F8",
c100="#D3E5F0",
c200="#A8CCE1",
c300="#7DB3D2",
c400="#529AC3",
c500="#4682B4",
c600="#3E72A0",
c700="#36638C",
c800="#2E5378",
c900="#264364",
c950="#1E3450",
)
class SteelBlueTheme(Soft):
def __init__(
self,
*,
primary_hue: colors.Color | str = colors.gray,
secondary_hue: colors.Color | str = colors.steel_blue,
neutral_hue: colors.Color | str = colors.slate,
text_size: sizes.Size | str = sizes.text_lg,
font: fonts.Font | str | Iterable[fonts.Font | str] = (
fonts.GoogleFont("Outfit"), "Arial", "sans-serif",
),
font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
fonts.GoogleFont("IBM Plex Mono"), "ui-monospace", "monospace",
),
):
super().__init__(
primary_hue=primary_hue,
secondary_hue=secondary_hue,
neutral_hue=neutral_hue,
text_size=text_size,
font=font,
font_mono=font_mono,
)
self.set(
background_fill_primary="*primary_50",
background_fill_primary_dark="*primary_900",
body_background_fill="linear-gradient(135deg, *primary_200, *primary_100)",
body_background_fill_dark="linear-gradient(135deg, *primary_900, *primary_800)",
button_primary_text_color="white",
button_primary_text_color_hover="white",
button_primary_background_fill="linear-gradient(90deg, *secondary_500, *secondary_600)",
button_primary_background_fill_hover="linear-gradient(90deg, *secondary_600, *secondary_700)",
button_primary_background_fill_dark="linear-gradient(90deg, *secondary_600, *secondary_800)",
button_primary_background_fill_hover_dark="linear-gradient(90deg, *secondary_500, *secondary_500)",
button_secondary_text_color="black",
button_secondary_text_color_hover="white",
button_secondary_background_fill="linear-gradient(90deg, *primary_300, *primary_300)",
button_secondary_background_fill_hover="linear-gradient(90deg, *primary_400, *primary_400)",
button_secondary_background_fill_dark="linear-gradient(90deg, *primary_500, *primary_600)",
button_secondary_background_fill_hover_dark="linear-gradient(90deg, *primary_500, *primary_500)",
slider_color="*secondary_500",
slider_color_dark="*secondary_600",
block_title_text_weight="600",
block_border_width="3px",
block_shadow="*shadow_drop_lg",
button_primary_shadow="*shadow_drop_lg",
button_large_padding="11px",
color_accent_soft="*primary_100",
block_label_background_fill="*primary_200",
)
steel_blue_theme = SteelBlueTheme()
# --- Hardware Setup ---
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("CUDA_VISIBLE_DEVICES=", os.environ.get("CUDA_VISIBLE_DEVICES"))
print("torch.__version__ =", torch.__version__)
print("cuda available:", torch.cuda.is_available())
if torch.cuda.is_available():
print("current device:", torch.cuda.current_device())
print("device name:", torch.cuda.get_device_name(torch.cuda.current_device()))
print("Using device:", device)
# --- Imports for Custom Pipeline ---
from diffusers import FlowMatchEulerDiscreteScheduler
from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
dtype = torch.bfloat16
# Load Pipeline with Rapid-AIO Transformer (Fast Version)
pipe = QwenImageEditPlusPipeline.from_pretrained(
"Qwen/Qwen-Image-Edit-2509",
transformer=QwenImageTransformer2DModel.from_pretrained(
"linoyts/Qwen-Image-Edit-Rapid-AIO",
subfolder='transformer',
torch_dtype=dtype,
device_map='cuda'
),
torch_dtype=dtype
).to(device)
# --- Load Fusion/Texture/Face-Swap LoRAs ---
print("Loading LoRA adapters...")
# 1. Texture Edit
pipe.load_lora_weights("tarn59/apply_texture_qwen_image_edit_2509",
weight_name="apply_texture_v2_qwen_image_edit_2509.safetensors",
adapter_name="texture-edit")
# 2. Fuse Objects
pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Fusion",
weight_name="溶图.safetensors",
adapter_name="fuse-objects")
# 3. Face Swap
pipe.load_lora_weights("Alissonerdx/BFS-Best-Face-Swap",
weight_name="bfs_face_v1_qwen_image_edit_2509.safetensors",
adapter_name="face-swap")
# Attempt to set Flash Attention 3
try:
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
print("Flash Attention 3 Processor set successfully.")
except Exception as e:
print(f"Could not set FA3 processor (likely hardware mismatch): {e}. Using default attention.")
MAX_SEED = np.iinfo(np.int32).max
def update_dimensions_on_upload(image):
if image is None:
return 1024, 1024
original_width, original_height = image.size
if original_width > original_height:
new_width = 1024
aspect_ratio = original_height / original_width
new_height = int(new_width * aspect_ratio)
else:
new_height = 1024
aspect_ratio = original_width / original_height
new_width = int(new_height * aspect_ratio)
# Ensure dimensions are multiples of 16 (safer for transformers)
new_width = (new_width // 16) * 16
new_height = (new_height // 16) * 16
return new_width, new_height
@spaces.GPU(duration=30)
def infer(
input_gallery_items,
prompt,
lora_adapter,
seed,
randomize_seed,
guidance_scale,
steps,
progress=gr.Progress(track_tqdm=True)
):
"""
Input:
input_gallery_items: Since type="pil", this is a List[Tuple[PIL.Image, str]] or List[PIL.Image]
"""
if not input_gallery_items:
raise gr.Error("Please upload an image to edit.")
# Extract the image from the Gallery input
# When type='pil', Gradio Gallery returns a list of tuples (image, caption) or just images
first_item = input_gallery_items[0]
if isinstance(first_item, tuple):
# Format is (PIL.Image, Caption)
input_pil = first_item[0]
else:
# Format is PIL.Image directly
input_pil = first_item
# Map Dropdown choices to internal Adapter names
adapters_map = {
"Texture Edit": "texture-edit",
"Fuse-Objects": "fuse-objects",
"Face-Swap": "face-swap",
}
active_adapter = adapters_map.get(lora_adapter)
# Reset adapters first, then activate selected
if active_adapter:
pipe.set_adapters([active_adapter], adapter_weights=[1.0])
else:
pipe.set_adapters([], adapter_weights=[])
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator(device=device).manual_seed(seed)
negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
original_image = input_pil.convert("RGB")
width, height = update_dimensions_on_upload(original_image)
result = pipe(
image=original_image,
prompt=prompt,
negative_prompt=negative_prompt,
height=height,
width=width,
num_inference_steps=steps,
generator=generator,
true_cfg_scale=guidance_scale,
).images[0]
return result, seed
@spaces.GPU(duration=30)
def infer_example(input_gallery_items, prompt, lora_adapter):
# input_gallery_items will be the list structure from gr.Examples
if not input_gallery_items:
return None, 0
# When passed from gr.Examples with type="pil" and a Gallery component,
# we might need to handle file paths if cache_examples=False or PIL if processed.
# However, since we use infer_example as the fn, we mimic the infer logic.
# For examples with type="pil", gradio usually converts paths to PIL.
return infer(
input_gallery_items,
prompt,
lora_adapter,
seed=0,
randomize_seed=True,
guidance_scale=1.0,
steps=4
)
css="""
#col-container {
margin: 0 auto;
max-width: 960px;
}
#main-title h1 {font-size: 2.1em !important;}
"""
with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown("# **Qwen-Image-Edit-2509-LoRAs-Fast-Fusion**", elem_id="main-title")
gr.Markdown("Perform advanced image manipulation including Texture editing, Object Fusion, and Face Swapping using specialized [LoRA](https://huggingface.co/models?other=base_model:adapter:Qwen/Qwen-Image-Edit-2509) adapters.")
with gr.Row(equal_height=True):
with gr.Column():
# Changed to Gallery to support potential multi-image flows (conceptually) and match user request
input_image = gr.Gallery(
label="Input Images",
show_label=False,
type="pil",
interactive=True,
height=290,
columns=1
)
prompt = gr.Text(
label="Edit Prompt",
show_label=True,
placeholder="e.g., Change the material to wooden texture...",
)
run_button = gr.Button("Edit Image", variant="primary")
with gr.Column():
output_image = gr.Image(label="Output Image", interactive=False, format="png", height=350)
with gr.Row():
lora_adapter = gr.Dropdown(
label="Choose Editing Style",
choices=["Texture Edit", "Fuse-Objects", "Face-Swap"],
value="Texture Edit"
)
with gr.Accordion("Advanced Settings", open=False, visible=False):
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=4)
gr.Examples(
examples=[
# Format: [ [Image_List], Prompt, Adapter ]
[
["examples/texture_sample.jpg", "examples/texture_sample.2jpg"],
"Change the material of the object to rusted metal texture.",
"Texture Edit"
],
[
["examples/fusion_sample.jpg"],
"Fuse the product naturally into the background.",
"Fuse-Objects"
],
[
["examples/face_sample.jpg"],
"Swap the face with a cyberpunk robot face.",
"Face-Swap"
],
],
inputs=[input_image, prompt, lora_adapter],
outputs=[output_image, seed],
fn=infer_example,
cache_examples=False,
label="Examples (Ensure images exist in 'examples/' folder)"
)
run_button.click(
fn=infer,
inputs=[input_image, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps],
outputs=[output_image, seed]
)
demo.launch(ssr_mode=False, show_error=True)