File size: 18,638 Bytes
0a6031f
95b168f
 
 
93f83a7
95b168f
a673b7c
 
95b168f
 
 
 
 
 
 
522960f
a673b7c
9a7039a
95b168f
 
 
 
 
 
 
9a7039a
95b168f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9a7039a
95b168f
 
 
 
 
9a7039a
95b168f
9a7039a
95b168f
 
 
 
 
 
9a7039a
95b168f
 
 
9a7039a
95b168f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9a7039a
95b168f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9a7039a
522960f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9a7039a
 
522960f
9a7039a
95b168f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
522960f
95b168f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a673b7c
9a7039a
 
 
95b168f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9a7039a
95b168f
824521e
18459f0
 
824521e
18459f0
95b168f
9a7039a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95b168f
 
9a7039a
 
95b168f
 
 
 
9a7039a
95b168f
 
 
 
 
 
 
9a7039a
 
 
 
 
95b168f
0a6031f
95b168f
a673b7c
9a7039a
 
 
522960f
9a7039a
522960f
9a7039a
a254cd6
9a7039a
 
 
 
 
5ba8d58
522960f
95b168f
522960f
 
 
 
 
 
80a0d12
 
 
 
 
 
 
 
 
 
 
 
 
 
522960f
 
 
1a5ced7
522960f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1a5ced7
 
 
 
522960f
 
 
1a5ced7
9a7039a
1a5ced7
 
 
 
 
 
 
 
5c10790
9a7039a
1a5ced7
 
 
e1024ab
1a5ced7
 
 
 
 
 
 
522960f
18eee06
 
80a0d12
18eee06
80a0d12
18eee06
 
522960f
4ff597c
 
 
 
9a7039a
522960f
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
import spaces
import math
import gradio as gr
import numpy as np
import torch
import safetensors.torch as sf
import db_examples

from PIL import Image
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
from diffusers import AutoencoderKL, UNet2DConditionModel, DDIMScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler
from diffusers.models.attention_processor import AttnProcessor2_0
from transformers import CLIPTextModel, CLIPTokenizer
from briarmbg import BriaRMBG
from enum import Enum
import requests

# Model setup
sd15_name = 'stablediffusionapi/realistic-vision-v51'
tokenizer = CLIPTokenizer.from_pretrained(sd15_name, subfolder="tokenizer")
text_encoder = CLIPTextModel.from_pretrained(sd15_name, subfolder="text_encoder")
vae = AutoencoderKL.from_pretrained(sd15_name, subfolder="vae")
unet = UNet2DConditionModel.from_pretrained(sd15_name, subfolder="unet")
rmbg = BriaRMBG.from_pretrained("briaai/RMBG-1.4")

# Change UNet
with torch.no_grad():
    new_conv_in = torch.nn.Conv2d(8, unet.conv_in.out_channels, unet.conv_in.kernel_size, unet.conv_in.stride, unet.conv_in.padding)
    new_conv_in.weight.zero_()
    new_conv_in.weight[:, :4, :, :].copy_(unet.conv_in.weight)
    new_conv_in.bias = unet.conv_in.bias
    unet.conv_in = new_conv_in

unet_original_forward = unet.forward

def hooked_unet_forward(sample, timestep, encoder_hidden_states, **kwargs):
    c_concat = kwargs['cross_attention_kwargs']['concat_conds'].to(sample)
    c_concat = torch.cat([c_concat] * (sample.shape[0] // c_concat.shape[0]), dim=0)
    new_sample = torch.cat([sample, c_concat], dim=1)
    kwargs['cross_attention_kwargs'] = {}
    return unet_original_forward(new_sample, timestep, encoder_hidden_states, **kwargs)

unet.forward = hooked_unet_forward

# Load model
model_path = './models/iclight_sd15_fc.safetensors'
sd_offset = sf.load_file(model_path)
sd_origin = unet.state_dict()
sd_merged = {k: sd_origin[k] + sd_offset[k] for k in sd_origin.keys()}
unet.load_state_dict(sd_merged, strict=True)
del sd_offset, sd_origin, sd_merged

# Device setup
device = torch.device('cuda')
text_encoder = text_encoder.to(device=device, dtype=torch.float16)
vae = vae.to(device=device, dtype=torch.bfloat16)
unet = unet.to(device=device, dtype=torch.float16)
rmbg = rmbg.to(device=device, dtype=torch.float32)

# SDP
unet.set_attn_processor(AttnProcessor2_0())
vae.set_attn_processor(AttnProcessor2_0())

# Samplers
ddim_scheduler = DDIMScheduler(
    num_train_timesteps=1000,
    beta_start=0.00085,
    beta_end=0.012,
    beta_schedule="scaled_linear",
    clip_sample=False,
    set_alpha_to_one=False,
    steps_offset=1,
)

euler_a_scheduler = EulerAncestralDiscreteScheduler(
    num_train_timesteps=1000,
    beta_start=0.00085,
    beta_end=0.012,
    steps_offset=1
)

dpmpp_2m_sde_karras_scheduler = DPMSolverMultistepScheduler(
    num_train_timesteps=1000,
    beta_start=0.00085,
    beta_end=0.012,
    algorithm_type="sde-dpmsolver++",
    use_karras_sigmas=True,
    steps_offset=1
)

# Pipelines
t2i_pipe = StableDiffusionPipeline(
    vae=vae,
    text_encoder=text_encoder,
    tokenizer=tokenizer,
    unet=unet,
    scheduler=dpmpp_2m_sde_karras_scheduler,
    safety_checker=None,
    requires_safety_checker=False,
    feature_extractor=None,
    image_encoder=None
)

i2i_pipe = StableDiffusionImg2ImgPipeline(
    vae=vae,
    text_encoder=text_encoder,
    tokenizer=tokenizer,
    unet=unet,
    scheduler=dpmpp_2m_sde_karras_scheduler,
    safety_checker=None,
    requires_safety_checker=False,
    feature_extractor=None,
    image_encoder=None
)

# Translation function
@spaces.GPU
def translate_albanian_to_english(text):
    if not text.strip():
        return ""
    for attempt in range(2):
        try:
            response = requests.post(
                "https://hal1993-mdftranslation1234567890abcdef1234567890-fc073a6.hf.space/v1/translate",
                json={"from_language": "sq", "to_language": "en", "input_text": text},
                headers={"accept": "application/json", "Content-Type": "application/json"},
                timeout=5
            )
            response.raise_for_status()
            translated = response.json().get("translate", "")
            return translated
        except Exception as e:
            if attempt == 1:
                raise gr.Error(f"Përkthimi dështoi: {str(e)}")
    raise gr.Error("Përkthimi dështoi. Ju lutem provoni përsëri.")

# Core processing functions
@torch.inference_mode()
def encode_prompt_inner(txt: str):
    max_length = tokenizer.model_max_length
    chunk_length = tokenizer.model_max_length - 2
    id_start = tokenizer.bos_token_id
    id_end = tokenizer.eos_token_id
    id_pad = id_end

    def pad(x, p, i):
        return x[:i] if len(x) >= i else x + [p] * (i - len(x))

    tokens = tokenizer(txt, truncation=False, add_special_tokens=False)["input_ids"]
    chunks = [[id_start] + tokens[i: i + chunk_length] + [id_end] for i in range(0, len(tokens), chunk_length)]
    chunks = [pad(ck, id_pad, max_length) for ck in chunks]

    token_ids = torch.tensor(chunks).to(device=device, dtype=torch.int64)
    conds = text_encoder(token_ids).last_hidden_state
    return conds

@torch.inference_mode()
def encode_prompt_pair(positive_prompt, negative_prompt):
    c = encode_prompt_inner(positive_prompt)
    uc = encode_prompt_inner(negative_prompt)

    c_len = float(len(c))
    uc_len = float(len(uc))
    max_count = max(c_len, uc_len)
    c_repeat = int(math.ceil(max_count / c_len))
    uc_repeat = int(math.ceil(max_count / uc_len))
    max_chunk = max(len(c), len(uc))

    c = torch.cat([c] * c_repeat, dim=0)[:max_chunk]
    uc = torch.cat([uc] * uc_repeat, dim=0)[:max_chunk]

    c = torch.cat([p[None, ...] for p in c], dim=1)
    uc = torch.cat([p[None, ...] for p in uc], dim=1)
    return c, uc

@torch.inference_mode()
def pytorch2numpy(imgs, quant=True):
    results = []
    for x in imgs:
        y = x.movedim(0, -1)
        if quant:
            y = y * 127.5 + 127.5
            y = y.detach().float().cpu().numpy().clip(0, 255).astype(np.uint8)
        else:
            y = y * 0.5 + 0.5
            y = y.detach().float().cpu().numpy().clip(0, 1).astype(np.float32)
        results.append(y)
    return results

@torch.inference_mode()
def numpy2pytorch(imgs):
    h = torch.from_numpy(np.stack(imgs, axis=0)).float() / 127.0 - 1.0
    h = h.movedim(-1, 1)
    return h

def resize_and_center_crop(image, target_width, target_height):
    pil_image = Image.fromarray(image)
    original_width, original_height = pil_image.size
    scale_factor = max(target_width / original_width, target_height / original_height)
    resized_width = int(round(original_width * scale_factor))
    resized_height = int(round(original_height * scale_factor))
    resized_image = pil_image.resize((resized_width, resized_height), Image.LANCZOS)
    left = (resized_width - target_width) / 2
    top = (resized_height - target_height) / 2
    right = (resized_width + target_width) / 2
    bottom = (resized_height + target_height) / 2
    cropped_image = resized_image.crop((left, top, right, bottom))
    return np.array(cropped_image)

def resize_without_crop(image, target_width, target_height):
    pil_image = Image.fromarray(image)
    resized_image = pil_image.resize((target_width, target_height), Image.LANCZOS)
    return np.array(resized_image)

@torch.inference_mode()
def run_rmbg(img, sigma=0.0):
    H, W, C = img.shape
    assert C == 3
    k = (256.0 / float(H * W)) ** 0.5
    feed = resize_without_crop(img, int(64 * round(W * k)), int(64 * round(H * k)))
    feed = numpy2pytorch([feed]).to(device=device, dtype=torch.float32)
    alpha = rmbg(feed)[0][0]
    alpha = torch.nn.functional.interpolate(alpha, size=(H, W), mode="bilinear")
    alpha = alpha.movedim(1, -1)[0]
    alpha = alpha.detach().float().cpu().numpy().clip(0, 1)
    result = 127 + (img.astype(np.float32) - 127 + sigma) * alpha
    return result.clip(0, 255).astype(np.uint8), alpha

@torch.inference_mode()
def process(input_fg, prompt, image_width, image_height, num_samples, seed, steps, a_prompt, n_prompt, cfg, highres_scale, highres_denoise, lowres_denoise, bg_source):
    if input_fg is None:
        raise gr.Error("Ju lutem ngarkoni një imazh.")

    bg_source = BGSource(bg_source)
    input_bg = None

    if bg_source == BGSource.NONE:
        pass
    elif bg_source == BGSource.LEFT:
        gradient = np.linspace(255, 0, image_width)
        image = np.tile(gradient, (image_height, 1))
        input_bg = np.stack((image,) * 3, axis=-1).astype(np.uint8)
    elif bg_source == BGSource.RIGHT:
        gradient = np.linspace(0, 255, image_width)
        image = np.tile(gradient, (image_height, 1))
        input_bg = np.stack((image,) * 3, axis=-1).astype(np.uint8)
    elif bg_source == BGSource.TOP:
        gradient = np.linspace(255, 0, image_height)[:, None]
        image = np.tile(gradient, (1, image_width))
        input_bg = np.stack((image,) * 3, axis=-1).astype(np.uint8)
    elif bg_source == BGSource.BOTTOM:
        gradient = np.linspace(0, 255, image_height)[:, None]
        image = np.tile(gradient, (1, image_width))
        input_bg = np.stack((image,) * 3, axis=-1).astype(np.uint8)
    else:
        raise gr.Error("Preferenca e ndriçimit është e pavlefshme!")

    if seed == -1:
        import random
        seed = random.randint(0, 2**32 - 1)

    rng = torch.Generator(device=device).manual_seed(int(seed))

    try:
        fg = resize_and_center_crop(input_fg, image_width, image_height)
        concat_conds = numpy2pytorch([fg]).to(device=vae.device, dtype=vae.dtype)
        concat_conds = vae.encode(concat_conds).latent_dist.mode() * vae.config.scaling_factor

        conds, unconds = encode_prompt_pair(positive_prompt=prompt + ', ' + a_prompt, negative_prompt=n_prompt)

        if input_bg is None:
            latents = t2i_pipe(
                prompt_embeds=conds,
                negative_prompt_embeds=unconds,
                width=image_width,
                height=image_height,
                num_inference_steps=steps,
                num_images_per_prompt=num_samples,
                generator=rng,
                output_type='latent',
                guidance_scale=cfg,
                cross_attention_kwargs={'concat_conds': concat_conds},
            ).images.to(vae.dtype) / vae.config.scaling_factor
        else:
            bg = resize_and_center_crop(input_bg, image_width, image_height)
            bg_latent = numpy2pytorch([bg]).to(device=vae.device, dtype=vae.dtype)
            bg_latent = vae.encode(bg_latent).latent_dist.mode() * vae.config.scaling_factor
            latents = i2i_pipe(
                image=bg_latent,
                strength=lowres_denoise,
                prompt_embeds=conds,
                negative_prompt_embeds=unconds,
                width=image_width,
                height=image_height,
                num_inference_steps=int(round(steps / lowres_denoise)),
                num_images_per_prompt=num_samples,
                generator=rng,
                output_type='latent',
                guidance_scale=cfg,
                cross_attention_kwargs={'concat_conds': concat_conds},
            ).images.to(vae.dtype) / vae.config.scaling_factor

        pixels = vae.decode(latents).sample
        pixels = pytorch2numpy(pixels)
        pixels = [resize_without_crop(
            image=p,
            target_width=int(round(image_width * highres_scale / 64.0) * 64),
            target_height=int(round(image_height * highres_scale / 64.0) * 64))
        for p in pixels]

        pixels = numpy2pytorch(pixels).to(device=vae.device, dtype=vae.dtype)
        latents = vae.encode(pixels).latent_dist.mode() * vae.config.scaling_factor
        latents = latents.to(device=unet.device, dtype=unet.dtype)

        image_height, image_width = latents.shape[2] * 8, latents.shape[3] * 8

        fg = resize_and_center_crop(input_fg, image_width, image_height)
        concat_conds = numpy2pytorch([fg]).to(device=vae.device, dtype=vae.dtype)
        concat_conds = vae.encode(concat_conds).latent_dist.mode() * vae.config.scaling_factor

        latents = i2i_pipe(
            image=latents,
            strength=highres_denoise,
            prompt_embeds=conds,
            negative_prompt_embeds=unconds,
            width=image_width,
            height=image_height,
            num_inference_steps=int(round(steps / highres_denoise)),
            num_images_per_prompt=num_samples,
            generator=rng,
            output_type='latent',
            guidance_scale=cfg,
            cross_attention_kwargs={'concat_conds': concat_conds},
        ).images.to(vae.dtype) / vae.config.scaling_factor

        pixels = vae.decode(latents).sample
        results = pytorch2numpy(pixels)
        return results[0]  # Return single image since num_samples=1
    except Exception as e:
        raise gr.Error(f"Gabim gjatë përpunimit të imazhit: {str(e)}")

@spaces.GPU
@torch.inference_mode()
def process_relight(input_fg, prompt, image_width, image_height, num_samples, seed, steps, a_prompt, n_prompt, cfg, highres_scale, highres_denoise, lowres_denoise, bg_source):
    if input_fg is None:
        raise gr.Error("Ju lutem ngarkoni një imazh.")

    # Translate Albanian prompt to English
    prompt_english = translate_albanian_to_english(prompt.strip()) if prompt.strip() else ""

    # Run background removal
    input_fg, matting = run_rmbg(input_fg)

    # Process the image
    result = process(input_fg, prompt_english, image_width, image_height, num_samples, seed, steps, a_prompt, n_prompt, cfg, highres_scale, highres_denoise, lowres_denoise, bg_source)
    
    return result

# Enum for background source (translated to Albanian)
class BGSource(Enum):
    NONE = "Asnjë"
    LEFT = "Dritë nga e Majta"
    RIGHT = "Dritë nga e Djathta"
    TOP = "Dritë nga Sipër"
    BOTTOM = "Dritë nga Poshtë"

# Function to update aspect ratio
def update_aspect_ratio(ratio):
    if ratio == "1:1":
        return 640, 640
    elif ratio == "9:16":
        width = 512
        height = int(round(512 * 16 / 9 / 64)) * 64  # Round to nearest multiple of 64
        return width, height
    elif ratio == "16:9":
        width = int(round(512 * 16 / 9 / 64)) * 64  # Round to nearest multiple of 64
        height = 512
        return width, height
    return 640, 640  # Default to 1:1

# UI Layout
def create_demo():
    with gr.Blocks() as block:
        # CSS for 320px gap, download button scaling, and container width constraint
        gr.HTML("""
        <style>
        body::before {
            content: "";
            display: block;
            height: 320px;
            background-color: var(--body-background-fill);
        }
        button[aria-label="Fullscreen"], button[aria-label="Fullscreen"]:hover {
            display: none !important;
            visibility: hidden !important;
            opacity: 0 !important;
            pointer-events: none !important;
        }
        button[aria-label="Share"], button[aria-label="Share"]:hover {
            display: none !important;
        }
        button[aria-label="Download"] {
            transform: scale(3);
            transform-origin: top right;
            margin: 0 !important;
            padding: 6px !important;
        }
        .constrained-container {
            max-width: 600px; /* Limits container width */
            margin: 0 auto; /* Centers the container */
        }
        </style>
        """)

        gr.Markdown("# Rindriço Imazhin")
        gr.Markdown("Rindriço imazhin duke ndryshuar ndriçimin e sfondit bazuar në përshkrimin e dhënë")

        with gr.Row():
            with gr.Column(elem_classes="constrained-container"):
                input_fg = gr.Image(sources='upload', type="numpy", label="Imazhi i Ngarkuar", height=480, width=480)
                prompt = gr.Textbox(label="Përshkrimi", placeholder="Shkruani përshkrimin këtu")
                bg_source = gr.Radio(choices=[e.value for e in BGSource], value=BGSource.NONE.value, label="Preferenca e Ndriçimit", type='value')
                aspect_ratio = gr.Radio(choices=["9:16", "1:1", "16:9"], value="1:1", label="Raporti i Aspektit")
                relight_button = gr.Button(value="Rindriço")
                result_image = gr.Image(label="Rezultati", type="numpy", height=480, width=480, elem_classes="constrained-container")
                # Hidden components for other parameters
                image_width = gr.Slider(label="Gjerësia e Imazhit", minimum=256, maximum=1024, value=640, step=64, visible=False)
                image_height = gr.Slider(label="Lartësia e Imazhit", minimum=256, maximum=1024, value=640, step=64, visible=False)
                num_samples = gr.Slider(label="Numri i Imazheve", minimum=1, maximum=12, value=1, step=1, visible=False)
                seed = gr.Number(label="Fara", value=-1, precision=0, visible=False)
                steps = gr.Slider(label="Hapat", minimum=1, maximum=100, value=50, step=1, visible=False)
                a_prompt = gr.Textbox(label="Përshkrim i Shtuar", value='best quality', visible=False)
                n_prompt = gr.Textbox(label="Përshkrim Negativ", value='lowres, bad anatomy, bad hands, cropped, worst quality', visible=False)
                cfg = gr.Slider(label="Shkalla CFG", minimum=1.0, maximum=32.0, value=2, step=0.01, visible=False)
                highres_scale = gr.Slider(label="Shkalla e Rezolutës së Lartë", minimum=1.0, maximum=3.0, value=2, step=0.01, visible=False)
                highres_denoise = gr.Slider(label="Denoise i Rezolutës së Lartë", minimum=0.1, maximum=1.0, value=0.5, step=0.01, visible=False)
                lowres_denoise = gr.Slider(label="Denoise i Rezolutës së Ulët", minimum=0.1, maximum=1.0, value=0.9, step=0.01, visible=False)

        # Update hidden sliders based on aspect ratio
        aspect_ratio.change(
            fn=update_aspect_ratio,
            inputs=[aspect_ratio],
            outputs=[image_width, image_height]
        )

        # Bind the relight button
        ips = [
            input_fg, prompt, image_width, image_height, num_samples, seed, steps,
            a_prompt, n_prompt, cfg, highres_scale, highres_denoise, lowres_denoise, bg_source
        ]
        relight_button.click(fn=process_relight, inputs=ips, outputs=result_image)

    return block

if __name__ == "__main__":
    print(f"Gradio version: {gr.__version__}")
    app = create_demo()
    app.launch(server_name='0.0.0.0')