File size: 9,259 Bytes
9bbc908
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dd9a58b
9bbc908
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dd9a58b
 
 
 
 
 
9bbc908
dd9a58b
 
 
 
9bbc908
dd9a58b
 
 
 
9bbc908
dd9a58b
9bbc908
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
# utils/editor.py
import os
import io
import math
from typing import Tuple, Dict, Any
from PIL import Image, ImageOps
import numpy as np

import torch
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
from transformers import logging as hf_logging
hf_logging.set_verbosity_error()

# detector auxiliar para gerar mapa de pose OpenPose-like
from controlnet_aux import OpenposeDetector

# para remoção de fundo da peça (extrair RGBA)
from rembg import remove

# parâmetros padrão (você pode ajustar)
MODEL_ID = "runwayml/stable-diffusion-v1-5"  # base SD v1.5
CONTROLNET_ID = "lllyasviel/sd-controlnet-openpose"  # controlnet openpose
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"

# pipeline cache globals
_PIPELINE = None
_OP_DETECTOR = None

def get_openpose_detector():
    global _OP_DETECTOR
    if _OP_DETECTOR is None:
        _OP_DETECTOR = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
    return _OP_DETECTOR

def load_pipeline():
    """
    Carrega o pipeline ControlNet + Stable Diffusion (com half precision quando possível).
    """
    global _PIPELINE
    if _PIPELINE is not None:
        return _PIPELINE

    # Carregar ControlNet
    controlnet = ControlNetModel.from_pretrained(CONTROLNET_ID, torch_dtype=torch.float16 if DEVICE=="cuda" else torch.float32)
    # Carregar pipeline SD + ControlNet
    pipe = StableDiffusionControlNetPipeline.from_pretrained(
        MODEL_ID,
        controlnet=controlnet,
        safety_checker=None,
        torch_dtype=torch.float16 if DEVICE=="cuda" else torch.float32,
    )
    # usar UniPC scheduler — melhora velocidade/qualidade
    pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
    if DEVICE == "cuda":
        pipe.enable_attention_slicing()  # economiza VRAM
        pipe.to("cuda")
    else:
        pipe.to("cpu")

    # reduzir torch_autocast config handled later in inference
    _PIPELINE = pipe
    return _PIPELINE

def remove_background(pil_img: Image.Image) -> Image.Image:
    """
    Remove fundo da imagem da peça usando rembg (retorna RGBA com alpha).
    """
    # rembg expects bytes
    img_bytes = io.BytesIO()
    pil_img.convert("RGBA").save(img_bytes, format="PNG")
    img_bytes = img_bytes.getvalue()
    out = remove(img_bytes)
    # out is bytes of PNG with alpha
    out_img = Image.open(io.BytesIO(out)).convert("RGBA")
    return out_img

def simple_align_garment_to_model(model_img: Image.Image, garment_rgba: Image.Image, pose_keypoints=None) -> Image.Image:
    """
    Faz um alinhamento simples: escala a peça pela distância entre ombros (estimada)
    e cola-a sobre a modelo aproximadamente no torso. Retorna imagem RGBA (com a modelo).
    Isso é só a iniciação — o SD+ControlNet fará o refinamento.
    """
    model = model_img.convert("RGBA")
    g = garment_rgba

    Wm, Hm = model.size
    Wg, Hg = g.size

    # fallback: centragem se não houver keypoints
    if pose_keypoints is None:
        # escala para metade da largura do modelo
        target_w = int(Wm * 0.5)
        scale = target_w / Wg
        new_size = (max(1, int(Wg * scale)), max(1, int(Hg * scale)))
        g_resized = g.resize(new_size, resample=Image.LANCZOS)
        pos = ((Wm - new_size[0]) // 2, int(Hm * 0.28))  # 28% from top as rough torso position
        canvas = model.copy()
        canvas.paste(g_resized, pos, g_resized)
        return canvas

    # se houver keypoints, tentamos usar ombros para dimensionar
    try:
        # keypoints: dict with names->(x,y) in pixel coords (as returned below)
        ls = pose_keypoints.get("left_shoulder")
        rs = pose_keypoints.get("right_shoulder")
        if ls and rs:
            shoulder_dist = math.hypot(rs[0]-ls[0], rs[1]-ls[1])
            # queremos que a peça cubra ~1.4x a largura dos ombros (ajustar conforme peça)
            target_w = int(shoulder_dist * 1.4)
            scale = max(0.1, target_w / Wg)
            new_size = (max(1, int(Wg * scale)), max(1, int(Hg * scale)))
            g_resized = g.resize(new_size, resample=Image.LANCZOS)
            # center position between shoulders, and slightly below
            center_x = int((ls[0] + rs[0]) / 2)
            top_y = int((ls[1] + rs[1]) / 1.8)  # move slightly up/down
            pos = (max(0, center_x - new_size[0]//2), max(0, top_y - new_size[1]//6))
            canvas = model.copy()
            canvas.paste(g_resized, pos, g_resized)
            return canvas
    except Exception:
        pass

    # fallback
    return simple_align_garment_to_model(model_img, garment_rgba, pose_keypoints=None)

def extract_pose_and_keypoints(model_img: Image.Image) -> Tuple[Image.Image, Dict[str, Tuple[int,int]]]:
    """
    Usa controlnet_aux.OpenposeDetector para gerar a pose map (imagem) e tenta retornar
    keypoints úteis (ombros). keypoints dict = {"left_shoulder":(x,y), ...}
    """
    detector = get_openpose_detector()
    try:
        # Gera o mapa de pose
        pose_image = detector(model_img)  # Chama diretamente como callable — retorna PIL.Image
        pose_image = pose_image.convert("RGB")

        # Tenta extrair keypoints (depende da versão)
        keypoints = {}
        try:
            # Alguns detectores permitem chamar .to(...) para mover para GPU, mas aqui vamos no básico
            # Versões recentes do controlnet_aux não expõem facilmente os keypoints
            # Vamos pular por enquanto — alinhamento será por fallback
            pass
        except Exception:
            pass

        return pose_image, keypoints

    except Exception as e:
        # fallback: return blank pose and empty keypoints
        blank = Image.new("RGB", model_img.size, (255,255,255))
        return blank, {}

def run_pipeline(model_image: Image.Image, garment_image: Image.Image, prompt_extra: str = "") -> Tuple[Image.Image, Dict[str,Any]]:
    """
    Função principal que:
    1) extrai pose (pose_map)
    2) remove fundo da peça (garment) e alinha simplisticamente
    3) monta uma imagem inicial (init_image) com a peça sobre a modelo (RGBA)
    4) chama Stable Diffusion + ControlNet (image2image) usando pose_map como conditioning image
    Retorna: pil_image_result, info_dict
    """
    # Convert PIL to consistent size (we'll resize to 768 on larger side to balance quality/VRAM)
    max_side = 768
    model_img = model_image.convert("RGB")
    W, H = model_img.size
    scale = max_side / max(W, H) if max(W, H) > max_side else 1.0
    if scale != 1.0:
        model_img = model_img.resize((int(W*scale), int(H*scale)), Image.LANCZOS)

    # garment: remove background to get alpha
    garment_rgba = remove_background(garment_image)

    # get pose map and shoulder keypoints
    pose_map, keypoints = extract_pose_and_keypoints(model_img)

    # align garment roughly
    init_composite = simple_align_garment_to_model(model_img, garment_rgba, pose_keypoints=keypoints)

    # prepare pipeline and control image
    pipe = load_pipeline()

    # create prompt: combine prompt_extra with description of garment (basic default)
    prompt = ("photo-realistic fashion try-on, ultra detailed, high resolution, realistic lighting. "
              + (prompt_extra or "garment applied on person, preserve texture and zippers, realistic folds."))

    # convert images to correct formats
    init_image = init_composite.convert("RGB")
    control_image = pose_map.convert("RGB")

    # inference parameters (tune if OOM)
    num_inference_steps = 20
    guidance_scale = 7.5
    strength = 0.75  # image2image strength (how much to change)

    # Run in autocast for fp16 if GPU is available
    generator = torch.Generator(device=DEVICE).manual_seed(torch.randint(0, 2**31 - 1, (1,)).item())

    # Note: Some versions of diffusers expect 'image' and 'control_image' keyword arguments
    # We'll call the pipeline defensively.
    device = DEVICE
    pipe.to(device)

    try:
        # The StableDiffusionControlNetPipeline supports image2image by passing 'image' and 'control_image'
        with torch.autocast(device_type="cuda") if device == "cuda" else torch.cpu.amp.autocast(enabled=False):
            out = pipe(
                prompt=prompt,
                image=init_image,
                control_image=control_image,
                num_inference_steps=num_inference_steps,
                guidance_scale=guidance_scale,
                strength=strength,
                generator=generator
            )
            # out.images is a list
            result_img = out.images[0]
    except TypeError:
        # Some diffusers versions use different signature; try alternate call
        out = pipe(
            prompt=prompt,
            init_image=init_image,
            controlnet_conditioning_image=control_image,
            num_inference_steps=num_inference_steps,
            guidance_scale=guidance_scale,
            strength=strength,
            generator=generator
        )
        result_img = out.images[0]

    info = {
        "model_id": MODEL_ID,
        "controlnet_id": CONTROLNET_ID,
        "steps": num_inference_steps,
        "guidance_scale": guidance_scale,
        "strength": strength
    }
    return result_img, info