File size: 3,414 Bytes
46e91d6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import torch
from PIL import Image
import gradio as gr
import spaces

from diffusers import QwenImageEditPipeline

# نحدد هل فيه GPU
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
DTYPE = torch.bfloat16 if DEVICE == "cuda" else torch.float32

# نحمل المودل الأساسي Qwen Image Edit
print("Loading Qwen/Qwen-Image-Edit pipeline...")
pipe = QwenImageEditPipeline.from_pretrained(
    "Qwen/Qwen-Image-Edit",
    torch_dtype=DTYPE
).to(DEVICE)
pipe.set_progress_bar_config(disable=None)

# اسم ملف اللورا داخل الريبو حق FoxBaze
LORA_REPO = "FoxBaze/Try_On_Qwen_Edit_Lora_Alpha"
LORA_WEIGHT_NAME = "Try_On_Qwen_Edit_Lora.safetensors"

PROMPT_WOMAN = (
    "Style the woman in the top of the image, with every article of clothing on the bottom"
)
PROMPT_MAN = (
    "Style the man in the top of the image, with every article of clothing on the bottom"
)

@spaces.GPU()
def try_on(model_image, garments_image, gender):
    """
    model_image: صورة الشخص (فوق)
    garments_image: صورة فيها الملابس (تحت)
    gender: 'woman' أو 'man'
    """

    if model_image is None or garments_image is None:
        raise gr.Error("Please upload both a model image and a garments image.")

    # نحول من numpy array إلى PIL
    model_pil = Image.fromarray(model_image).convert("RGB")
    garments_pil = Image.fromarray(garments_image).convert("RGB")

    # نفعّل لورا التجربة
    pipe.load_lora_weights(
        LORA_REPO,
        weight_names=LORA_WEIGHT_NAME,
        adapter_names="tryon"
    )

    prompt = PROMPT_WOMAN if gender == "woman" else PROMPT_MAN

    inputs = {
        "image": [model_pil, garments_pil],  # top = الشخص, bottom = الملابس
        "prompt": prompt,
        "generator": torch.manual_seed(0),
        "true_cfg_scale": 4.0,
        "negative_prompt": " ",
        "num_inference_steps": 30,
    }

    with torch.inference_mode():
        result = pipe(**inputs)

    # نشيل اللورا عشان ما تتراكم في الذاكرة بين الطلبات
    pipe.delete_adapters("tryon")

    output_image = result.images[0]
    return output_image


with gr.Blocks() as demo:
    gr.Markdown(
        """
        # 👗 Qwen Try-On (FoxBaze LoRA Alpha)

        Upload a **model photo** on the top, and a **garments image** on the bottom
        (one subject on top row, multiple clothing items on bottom row).
        The app will style the subject with all garments.
        """
    )

    with gr.Row():
        with gr.Column():
            model_image = gr.Image(
                label="Top image: Model (full body preferred)",
                type="numpy"
            )
            garments_image = gr.Image(
                label="Bottom image: Garments (multiple pieces)",
                type="numpy"
            )
            gender = gr.Radio(
                ["woman", "man"],
                value="woman",
                label="Subject gender (for prompt tuning)",
            )
            run_btn = gr.Button("Generate Try-On")

        with gr.Column():
            output_image = gr.Image(
                label="Result",
                interactive=False
            )

    run_btn.click(
        fn=try_on,
        inputs=[model_image, garments_image, gender],
        outputs=[output_image],
    )

if __name__ == "__main__":
    demo.launch()