|
|
import torch |
|
|
from PIL import Image |
|
|
import gradio as gr |
|
|
import spaces |
|
|
|
|
|
from diffusers import QwenImageEditPipeline |
|
|
|
|
|
|
|
|
DEVICE = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
DTYPE = torch.bfloat16 if DEVICE == "cuda" else torch.float32 |
|
|
|
|
|
|
|
|
print("Loading Qwen/Qwen-Image-Edit pipeline...") |
|
|
pipe = QwenImageEditPipeline.from_pretrained( |
|
|
"Qwen/Qwen-Image-Edit", |
|
|
torch_dtype=DTYPE |
|
|
).to(DEVICE) |
|
|
pipe.set_progress_bar_config(disable=None) |
|
|
|
|
|
|
|
|
LORA_REPO = "FoxBaze/Try_On_Qwen_Edit_Lora_Alpha" |
|
|
LORA_WEIGHT_NAME = "Try_On_Qwen_Edit_Lora.safetensors" |
|
|
|
|
|
PROMPT_WOMAN = ( |
|
|
"Style the woman in the top of the image, with every article of clothing on the bottom" |
|
|
) |
|
|
PROMPT_MAN = ( |
|
|
"Style the man in the top of the image, with every article of clothing on the bottom" |
|
|
) |
|
|
|
|
|
@spaces.GPU() |
|
|
def try_on(model_image, garments_image, gender): |
|
|
""" |
|
|
model_image: صورة الشخص (فوق) |
|
|
garments_image: صورة فيها الملابس (تحت) |
|
|
gender: 'woman' أو 'man' |
|
|
""" |
|
|
|
|
|
if model_image is None or garments_image is None: |
|
|
raise gr.Error("Please upload both a model image and a garments image.") |
|
|
|
|
|
|
|
|
model_pil = Image.fromarray(model_image).convert("RGB") |
|
|
garments_pil = Image.fromarray(garments_image).convert("RGB") |
|
|
|
|
|
|
|
|
pipe.load_lora_weights( |
|
|
LORA_REPO, |
|
|
weight_names=LORA_WEIGHT_NAME, |
|
|
adapter_names="tryon" |
|
|
) |
|
|
|
|
|
prompt = PROMPT_WOMAN if gender == "woman" else PROMPT_MAN |
|
|
|
|
|
inputs = { |
|
|
"image": [model_pil, garments_pil], |
|
|
"prompt": prompt, |
|
|
"generator": torch.manual_seed(0), |
|
|
"true_cfg_scale": 4.0, |
|
|
"negative_prompt": " ", |
|
|
"num_inference_steps": 30, |
|
|
} |
|
|
|
|
|
with torch.inference_mode(): |
|
|
result = pipe(**inputs) |
|
|
|
|
|
|
|
|
pipe.delete_adapters("tryon") |
|
|
|
|
|
output_image = result.images[0] |
|
|
return output_image |
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown( |
|
|
""" |
|
|
# 👗 Qwen Try-On (FoxBaze LoRA Alpha) |
|
|
|
|
|
Upload a **model photo** on the top, and a **garments image** on the bottom |
|
|
(one subject on top row, multiple clothing items on bottom row). |
|
|
The app will style the subject with all garments. |
|
|
""" |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(): |
|
|
model_image = gr.Image( |
|
|
label="Top image: Model (full body preferred)", |
|
|
type="numpy" |
|
|
) |
|
|
garments_image = gr.Image( |
|
|
label="Bottom image: Garments (multiple pieces)", |
|
|
type="numpy" |
|
|
) |
|
|
gender = gr.Radio( |
|
|
["woman", "man"], |
|
|
value="woman", |
|
|
label="Subject gender (for prompt tuning)", |
|
|
) |
|
|
run_btn = gr.Button("Generate Try-On") |
|
|
|
|
|
with gr.Column(): |
|
|
output_image = gr.Image( |
|
|
label="Result", |
|
|
interactive=False |
|
|
) |
|
|
|
|
|
run_btn.click( |
|
|
fn=try_on, |
|
|
inputs=[model_image, garments_image, gender], |
|
|
outputs=[output_image], |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |