File size: 24,823 Bytes
0b216ca
 
efba359
81bf056
 
 
 
 
 
0b216ca
81bf056
 
 
 
 
 
 
 
 
 
f9553e7
 
 
551e4bd
 
 
81bf056
 
 
 
551e4bd
 
 
81bf056
 
bacd970
81bf056
 
 
 
 
 
 
 
 
 
 
 
efba359
81bf056
 
 
 
 
551e4bd
 
 
828d71c
 
f9553e7
 
 
94e705f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81bf056
94e705f
 
 
81bf056
 
828d71c
81bf056
f9553e7
81bf056
 
551e4bd
828d71c
551e4bd
81bf056
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b3cd57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81bf056
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
551e4bd
9b3cd57
 
81bf056
551e4bd
81bf056
 
 
9b3cd57
 
 
81bf056
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0b216ca
efba359
 
 
0b216ca
 
efba359
0b216ca
 
 
 
 
 
 
 
 
efba359
0b216ca
 
 
 
 
 
 
 
 
 
efba359
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0b216ca
 
efba359
0b216ca
efba359
 
 
 
 
 
 
 
 
 
 
0b216ca
 
 
 
 
efba359
0b216ca
 
 
efba359
0b216ca
 
2c1acb2
0b216ca
 
 
efba359
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0b216ca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
efba359
 
 
0b216ca
 
81bf056
 
 
 
 
 
 
1bef290
81bf056
 
 
1bef290
81bf056
 
 
1bef290
81bf056
 
 
1bef290
81bf056
 
 
1bef290
81bf056
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0b216ca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
efba359
 
 
 
0b216ca
81bf056
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0b216ca
 
 
 
 
 
 
 
 
81bf056
 
0b216ca
81bf056
 
 
 
 
 
9b3cd57
 
 
 
 
 
81bf056
0b216ca
 
 
 
 
 
 
 
 
 
 
 
81bf056
 
 
 
0b216ca
81bf056
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
828d71c
 
 
 
 
 
 
 
 
81bf056
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
import base64
import io
import json
import os
import sys
from typing import Union, Any, Optional

import gradio as gr
import numpy as np
import requests
import torch
from PIL import Image
import spaces

# 添加项目根目录到Python路径
project_root = os.path.dirname(os.path.abspath(__file__))
sys.path.append(project_root)
hf_token = os.environ.get("CASCADE_PRIVATE_MODEL_HF_TOKEN")
secret_model = os.environ.get("MODEL_PATH")

# 从环境变量获取基础模型路径
BASE_MODEL = os.environ.get("BASE_MODEL_ID")

from cascade.condition import Condition
from cascade.generate import generate
from cascade.lora_controller import set_lora_scale

from huggingface_hub import hf_hub_download
from safetensors.torch import load_file

# Global pipeline variable
_global_pipe = None

# 認証トークンを使ってファイルをダウンロード
model_path = hf_hub_download(
    repo_id="Cascade-Inc/private_model",
    filename=secret_model,
    token=hf_token,
    repo_type="space"
)

# Get temp directory
temp_dir = os.path.join(os.path.expanduser("~"), "gradio_temp")
os.makedirs(temp_dir, exist_ok=True)
os.environ["GRADIO_TEMP_DIR"] = temp_dir

ADAPTER_NAME = "subject"
MODEL_PATH = model_path
ZEN_BG_ENDPOINT = "https://zen-inpaint-1066271267292.europe-west1.run.app/"

def get_gpu_memory_gb() -> float:
    return torch.cuda.get_device_properties(0).total_memory / 1024**3

def init_pipeline_if_needed():
    global _global_pipe
    if _global_pipe is not None:
        return _global_pipe
    
    print("🚀 Initializing pipeline...")
    
    # 如果设置了 BASE_MODEL_ID,从私有库加载预配置的 pipeline
    if BASE_MODEL:
        print(f"Loading pipeline from: {BASE_MODEL}")
        try:
            # 下载并导入私有库中的 pipeline 加载脚本
            pipeline_loader_path = hf_hub_download(
                repo_id="Cascade-Inc/private_model",
                filename=BASE_MODEL,  # 应该是 .py 文件,例如: "pipeline_loader.py"
                token=hf_token,
                repo_type="space"
            )
            
            # 动态导入
            import importlib.util
            spec = importlib.util.spec_from_file_location("pipeline_loader", pipeline_loader_path)
            pipeline_module = importlib.util.module_from_spec(spec)
            spec.loader.exec_module(pipeline_module)
            
            # 调用私有库中的函数获取 pipeline
            _pipe = pipeline_module.get_pipeline(hf_token)
        except Exception as e:
            print(f"❌ Error loading pipeline from {BASE_MODEL}: {e}")
            raise ValueError(
                f"Failed to load pipeline loader from BASE_MODEL_ID='{BASE_MODEL}'. "
                f"Make sure:\n"
                f"1. The file exists in Cascade-Inc/private_model space\n"
                f"2. BASE_MODEL_ID should be a .py file name (e.g., 'pipeline_loader.py')\n"
                f"3. Not the LoRA path (that's MODEL_PATH)"
            )
    else:
        raise ValueError(
            "BASE_MODEL_ID environment variable is not set.\n"
            "Please set it to the pipeline loader filename (e.g., 'pipeline_loader.py')"
        )

    print("📦 Loading model to CUDA...")
    _pipe = _pipe.to("cuda")
    print("🎨 Loading Cascade weights...")
    _pipe.load_lora_weights(MODEL_PATH, adapter_name=ADAPTER_NAME)
    _pipe.set_adapters([ADAPTER_NAME])
    _global_pipe = _pipe
    print("✅ Pipeline initialized successfully!")
    return _global_pipe

def _to_pil_rgba(img: Any) -> Image.Image:
    """Convert various inputs to PIL RGBA image"""
    pil: Optional[Image.Image] = None
    
    if isinstance(img, Image.Image):
        pil = img
    elif isinstance(img, np.ndarray):
        pil = Image.fromarray(img)
    elif isinstance(img, str) and os.path.exists(img):
        pil = Image.open(img)
    else:
        raise ValueError("Unsupported image type")
    
    if pil.mode != "RGBA":
        pil = pil.convert("RGBA")
    return pil

def _center_subject_on_canvas(subject_rgba: Image.Image, canvas_width: int, canvas_height: int) -> Image.Image:
    """
    Center the subject image on a transparent canvas that matches the requested size.
    """
    if subject_rgba is None:
        return Image.new("RGBA", (canvas_width, canvas_height), (0, 0, 0, 0))

    canvas = Image.new("RGBA", (canvas_width, canvas_height), (0, 0, 0, 0))
    paste_x = (canvas_width - subject_rgba.width) // 2
    paste_y = (canvas_height - subject_rgba.height) // 2

    # If the subject is larger than canvas, crop to fit
    if subject_rgba.width > canvas_width or subject_rgba.height > canvas_height:
        subject_rgba = subject_rgba.crop(
            (
                max(0, -paste_x),
                max(0, -paste_y),
                max(0, -paste_x) + min(canvas_width, subject_rgba.width),
                max(0, -paste_y) + min(canvas_height, subject_rgba.height),
            )
        )
        paste_x = max(0, paste_x)
        paste_y = max(0, paste_y)

    canvas.alpha_composite(subject_rgba, dest=(paste_x, paste_y))
    return canvas

def _place_subject_on_canvas(
    subject_rgba: Image.Image,
    canvas_size: int,
    style: str,
    base_coverage: float = 0.7,
) -> Image.Image:
    """
    Place subject on transparent canvas with position and angle adjustments based on style
    """
    canvas = Image.new("RGBA", (canvas_size, canvas_size), (0, 0, 0, 0))
    
    # Define three styles
    styles = {
        "center": {"scale": 1.0, "rotation": 0, "pos": (0.0, 0.0)},
        "tilt_left": {"scale": 0.95, "rotation": -15, "pos": (-0.1, 0.0)},
        "right": {"scale": 0.95, "rotation": 0, "pos": (0.25, 0.0)},
    }
    
    if style not in styles:
        style = "center"
    
    style_config = styles[style]
    
    # Calculate scaling
    subject_w, subject_h = subject_rgba.size
    max_dim = max(subject_w, subject_h)
    desired_max_dim = max(1, int(canvas_size * base_coverage * style_config["scale"]))
    scale = desired_max_dim / max(1, max_dim)
    new_w = max(1, int(subject_w * scale))
    new_h = max(1, int(subject_h * scale))
    resized = subject_rgba.resize((new_w, new_h), Image.LANCZOS)
    
    # Rotation
    rotated = resized.rotate(style_config["rotation"], expand=True, resample=Image.BICUBIC)
    rw, rh = rotated.size
    
    # Positioning
    cx = canvas_size // 2
    cy = canvas_size // 2
    dx = int(style_config["pos"][0] * canvas_size)
    dy = int(style_config["pos"][1] * canvas_size)
    
    paste_x = int(cx + dx - rw // 2)
    paste_y = int(cy + dy - rh // 2)
    
    canvas.alpha_composite(rotated, dest=(paste_x, paste_y))
    return canvas

def _place_subject_on_canvas_rect(
    subject_rgba: Image.Image,
    canvas_width: int,
    canvas_height: int,
    style: str,
    base_coverage: float = 0.7,
) -> Image.Image:
    """
    Place subject on rectangular transparent canvas with position and angle adjustments based on style
    """
    canvas = Image.new("RGBA", (canvas_width, canvas_height), (0, 0, 0, 0))
    
    # Define three styles
    styles = {
        "center": {"scale": 1.0, "rotation": 0, "pos": (0.0, 0.0)},
        "tilt_left": {"scale": 0.95, "rotation": -15, "pos": (-0.1, 0.0)},
        "right": {"scale": 0.95, "rotation": 0, "pos": (0.25, 0.0)},
    }
    
    if style not in styles:
        style = "center"
    
    style_config = styles[style]
    
    # Calculate scaling based on smaller dimension
    subject_w, subject_h = subject_rgba.size
    max_dim = max(subject_w, subject_h)
    canvas_min_dim = min(canvas_width, canvas_height)
    desired_max_dim = max(1, int(canvas_min_dim * base_coverage * style_config["scale"]))
    scale = desired_max_dim / max(1, max_dim)
    new_w = max(1, int(subject_w * scale))
    new_h = max(1, int(subject_h * scale))
    resized = subject_rgba.resize((new_w, new_h), Image.LANCZOS)
    
    # Rotation
    rotated = resized.rotate(style_config["rotation"], expand=True, resample=Image.BICUBIC)
    rw, rh = rotated.size
    
    # Positioning
    cx = canvas_width // 2
    cy = canvas_height // 2
    dx = int(style_config["pos"][0] * canvas_width)
    dy = int(style_config["pos"][1] * canvas_height)
    
    paste_x = int(cx + dx - rw // 2)
    paste_y = int(cy + dy - rh // 2)
    
    canvas.alpha_composite(rotated, dest=(paste_x, paste_y))
    return canvas

def apply_style(image: Image.Image, style: str, width: int = 1024, height: int = 1024) -> Image.Image:
    """Apply specified style to image with custom dimensions"""
    if image is None:
        # Create default transparent image
        image = Image.new("RGBA", (512, 512), (255, 255, 255, 0))
    
    # Ensure image is in RGBA format
    if image.mode != "RGBA":
        image = image.convert("RGBA")
    
    # Apply style with custom dimensions
    styled_image = _place_subject_on_canvas_rect(image, width, height, style)
    return styled_image

def generate_background_local(styled_image: Image.Image, prompt: str, steps: int = 10, width: int = 1024, height: int = 1024) -> Image.Image:
    """Generate background using local model"""
    width = int(width)
    height = int(height)
    
    pipe = init_pipeline_if_needed()
    
    if styled_image is None:
        return Image.new("RGB", (width, height), (255, 255, 255))

    # Ensure the subject image matches requested canvas size
    styled_image = _center_subject_on_canvas(styled_image, width, height)
    
    # Convert to RGB for background generation
    img_rgb = styled_image.convert("RGB")
    
    condition = Condition(ADAPTER_NAME, img_rgb, position_delta=(0, 0))
    
    # Enable padding token orthogonalization for enhanced text-image alignment
    model_config = {
        'padding_orthogonalization_enabled': True,
        'preserve_norm': True,
        'orthogonalize_all_tokens': False,
    }
    
    with set_lora_scale([ADAPTER_NAME], scale=3.0):
        result_img = generate(
            pipe,
            model_config=model_config,
            prompt=prompt.strip() if prompt else "",
            conditions=[condition],
            num_inference_steps=steps,
            height=height,
            width=width,
            default_lora=True,
        ).images[0]
    
    return result_img

def image_to_base64(image: Image.Image) -> str:
    """Convert PIL Image to base64 string (PNG to preserve transparency)"""
    if image.mode != "RGBA":
        image = image.convert("RGBA")
    
    buffer = io.BytesIO()
    image.save(buffer, format="PNG")
    img_bytes = buffer.getvalue()
    return base64.b64encode(img_bytes).decode("utf-8")

def generate_background_api(
    styled_image: Image.Image,
    prompt: str,
    steps: int = 4,
    api_key: str = "",
    email: str = "",
    zen_mode: str = "bg_generation",
) -> Image.Image:
    """Generate background using API"""
    if styled_image is None:
        return Image.new("RGB", (1024, 1024), (255, 255, 255))
    
    if not api_key or not email:
        return Image.new("RGB", styled_image.size, (255, 200, 200))  # Red tint to indicate error
    
    try:
        width, height = styled_image.size
        base64_image = image_to_base64(styled_image)
        # Ensure padding so the API always receives valid Base64 chunks
        subject_base64 = base64_image + "=" * (-len(base64_image) % 4)

        # Map legacy UI modes to documented gen_mode values
        gen_mode = {
            "subject": "bg_generation",
            "canny": "bg_generation",
            "bg_generation": "bg_generation",
        }.get(zen_mode, "bg_generation")

        max_dim = max(width, height)
        if max_dim <= 1024:
            upscale = "1k"
        elif max_dim <= 1536:
            upscale = "1.5k"
        else:
            upscale = "2k"

        payload = {
            "gen_mode": gen_mode,
            "prompt": prompt.strip() if prompt else "professional product photography background",
            "subject": subject_base64,
            "subject_format": "base64",
            "background": "",
            "negative_prompt": "",
            "steps": int(steps),
            "seed": 42,
            "randomize_seed": True,
            "bg_upscale_choice": upscale,
            "max_bg_side_px": int(max_dim),
            "output_image_format": "base64",
            "use_bg_size_for_output": True,
        }

        headers = {
            "x-api-key": api_key,
            "x-email": email,
            "Content-Type": "application/json",
        }

        response = requests.post(
            ZEN_BG_ENDPOINT,
            headers=headers,
            json=payload,
            timeout=60,
        )

        if response.status_code == 200:
            try:
                result_data = response.json()
            except Exception:
                print(f"[API] Unable to parse response JSON: {response.text[:200]}")
                result_data = {}
            image_field = result_data.get("image")
            if image_field:
                if image_field.startswith("http"):
                    try:
                        img_resp = requests.get(image_field, timeout=60)
                        img_resp.raise_for_status()
                        return Image.open(io.BytesIO(img_resp.content))
                    except Exception as download_err:
                        print(f"[API] Failed to download image URL: {download_err}")
                else:
                    try:
                        img_data = base64.b64decode(image_field)
                        return Image.open(io.BytesIO(img_data))
                    except Exception as decode_err:
                        print(f"[API] Failed to decode base64 response: {decode_err}")
            print(f"[API] 200 response without image: {result_data}")
        else:
            print(f"[API] Non-200 response ({response.status_code}): {response.text[:500]}")
        return Image.new("RGB", styled_image.size, (255, 200, 200))
    except Exception as e:
        print(f"API Error: {e}")
        return Image.new("RGB", styled_image.size, (255, 200, 200))

def generate_background(
    styled_image: Image.Image,
    prompt: str,
    steps: int = 10,
    use_api: bool = False,
    api_key: str = "",
    email: str = "",
    width: int = 1024,
    height: int = 1024,
    mode: str = "subject",
) -> Image.Image:
    """Generate background using either API or local model"""
    if use_api:
        return generate_background_api(
            styled_image, prompt, steps, api_key, email, zen_mode=mode
        )
    return generate_background_local(styled_image, prompt, steps, width, height)

@spaces.GPU
# Gradio Interface
def create_simple_app():
    # Example prompts for reference
    example_prompts = [
        {
            "title": "Handcrafted Leather Wallet",
            "prompt": "A premium lifestyle advertisement for a hand-stitched dark brown leather wallet. The wallet is half-open on a timeworn walnut desk, revealing the suede interior and a few vintage travel tickets. Surround it with a rolled map, brass fountain pen, and antique compass to emphasize heritage craftsmanship. Soft amber light from a desk lamp on the right grazes the grainy leather and creates gentle shadow falloff, while a blurred wall of old books fills the background. Overall tone is classic, rustic, and aspirational."
        },
        {
            "title": "Sparkling Water with Fresh Lemons", 
            "prompt": "A product hero shot for a premium sparkling water infused with fresh lemons. Place a dewy glass bottle at the center of a white marble countertop, with a tall tumbler filled with effervescent water, thin lemon wheels, and crystal-clear ice cubes beside it. Scatter a few lemon zest curls and condensation droplets for sensory detail. Use a soft-focus pale blue and white gradient background to communicate freshness, and bathe the scene in bright, cool, top-down lighting that creates sharp reflections. Keep the styling ultra-clean, crisp, and minimalist."
        },
        {
            "title": "High-tech Smartwatch",
            "prompt": "A cinematic tech advertisement for a titanium smartwatch with an always-on illuminated screen displaying futuristic UI graphics. Position the watch on a jagged slab of matte black slate to contrast its polished chamfered edges. Behind it, place a blurred nighttime cityscape with teal and magenta neon bokeh to suggest urban energy. Hit the product with a sharp, directional spotlight from the top left to carve out highlights along the bezel and bracelet, while subtle rim lighting separates it from the background. Mood is sleek, futuristic, and performance-driven."
        },
        {
            "title": "Japanese Ramen Bowl",
            "prompt": "A mouthwatering food advertisement for a ceramic bowl of tonkotsu ramen. Present silky broth with two slices of torched chashu pork, a jammy soft-boiled egg, nori sheets, scallions, and sesame seeds arranged artfully. Place the bowl on a rustic wooden table with lacquered chopsticks resting on a ceramic holder, plus a tiny dish of pickled ginger for color. Capture wisps of steam drifting upward in soft overhead light, while the background falls into a blurred, amber-toned izakaya interior with paper lanterns. Atmosphere is warm, authentic, and comforting."
        },
        {
            "title": "Japanese Peach Iced Tea",
            "prompt": "A commercial advertisement for a Japanese peach-flavored iced tea. The composition features the product bottle placed next to a tall, elegant glass filled with the tea and sparkling ice cubes. The background is a soft, warm gradient of peach and beige, creating a gentle and sophisticated atmosphere. The overall style is clean, minimalist, and refined, with bright, soft lighting that highlights the crisp, refreshing quality of the beverage."
        }
    ]

    with gr.Blocks(title="Ads Background Generation") as app:
        gr.Markdown("# Ads Background Generation App")
        gr.Markdown("Upload an image with transparent background → Enter prompt → Generate")
        
        # Example Prompts Section
        with gr.Accordion("📝 Example Prompts (Click to expand)", open=False):
            gr.Markdown("### Background Prompt Examples")
            gr.Markdown("Click any example below to copy it to the background description field:")
            
            # Create example buttons
            example_buttons = []
            with gr.Row():
                for i, example in enumerate(example_prompts):
                    if i < 3:  # First row
                        example_btn = gr.Button(
                            f"📋 {example['title']}", 
                            variant="secondary",
                            size="sm"
                        )
                        example_buttons.append(example_btn)
            
            with gr.Row():
                for i, example in enumerate(example_prompts):
                    if i >= 3:  # Second row
                        example_btn = gr.Button(
                            f"📋 {example['title']}", 
                            variant="secondary",
                            size="sm"
                        )
                        example_buttons.append(example_btn)
            
            # Display area for selected prompt preview
            selected_prompt_display = gr.Textbox(
                label="Selected Prompt Preview",
                lines=4,
                max_lines=8,
                interactive=False,
                visible=False
            )
        
        with gr.Row():
            # Left column
            with gr.Column(scale=1):
                # Image upload (top left)
                input_image = gr.Image(
                    label="Upload Image (Transparent Background)",
                    type="pil",
                    format="png",
                    image_mode="RGBA",
                    height=350
                )
                
                # Image dimensions
                with gr.Row():
                    img_width = gr.Number(
                        value=1024,
                        label="Width",
                        precision=0,
                        minimum=256,
                        maximum=2048
                    )
                    img_height = gr.Number(
                        value=1024,
                        label="Height", 
                        precision=0,
                        minimum=256,
                        maximum=2048
                    )
                
                # Background prompt (bottom left)
                bg_prompt = gr.Textbox(
                    label="Background Description",
                    placeholder="e.g.: Forest scene, soft lighting",
                    lines=3
                )

                use_api = gr.Checkbox(
                    label="Use API",
                    value=False
                )

                with gr.Group(visible=False) as api_group:
                    api_key = gr.Textbox(
                        label="API Key",
                        type="password",
                        placeholder="Enter your API key"
                    )
                    email = gr.Textbox(
                        label="Email",
                        placeholder="Enter your registered email"
                    )
                    mode = gr.Radio(
                        choices=["bg_generation"],
                        value="bg_generation",
                        label="API gen_mode",
                        interactive=False
                    )
                
                # Generation steps
                steps_slider = gr.Slider(
                    minimum=5,
                    maximum=20,
                    value=10,
                    step=1,
                    label="Generation Steps"
                )
                
                # Generate background button
                generate_bg_btn = gr.Button("Generate Background", variant="primary", size="lg")
                
            # Right column - Result display
            with gr.Column(scale=1):
                final_result = gr.Image(
                    label="Generated Result",
                    type="pil",
                    format="png",
                    height=700
                )

        def toggle_api_group(use_api_flag):
            return gr.update(visible=use_api_flag)
        
        use_api.change(
            fn=toggle_api_group,
            inputs=[use_api],
            outputs=[api_group]
        )
        
        # Generate background directly from input image
        def generate_from_input(image, prompt, steps, width, height, use_api_flag, api_key_value, email_value, mode_value):
            if image is None:
                return None
            
            # Ensure image is RGBA
            if image.mode != "RGBA":
                image = image.convert("RGBA")

            width = int(width)
            height = int(height)

            # Center original uploaded image on a transparent canvas without resizing
            image = _center_subject_on_canvas(image, width, height)
            
            # Generate background using selected method
            return generate_background(
                image,
                prompt,
                steps,
                use_api_flag,
                api_key_value,
                email_value,
                width,
                height,
                mode_value,
            )
        
        # Event binding
        generate_bg_btn.click(
            fn=generate_from_input,
            inputs=[input_image, bg_prompt, steps_slider, img_width, img_height, use_api, api_key, email, mode],
            outputs=[final_result]
        )
        
        # Example prompt button handlers
        def create_example_handler(prompt_text):
            def handler():
                return prompt_text, gr.update(value=prompt_text, visible=True)
            return handler
        
        # Connect example buttons to background prompt field and preview
        for i, example_btn in enumerate(example_buttons):
            if i < len(example_prompts):
                example_btn.click(
                    fn=create_example_handler(example_prompts[i]['prompt']),
                    outputs=[bg_prompt, selected_prompt_display]
                )
    
    return app

# 在应用启动前预加载模型
print("=" * 60)
print("🔧 Pre-loading models on startup...")
print("=" * 60)
init_pipeline_if_needed()
print("=" * 60)
print("✨ All models loaded and ready!")
print("=" * 60)

if __name__ == "__main__":
    app = create_simple_app()
    app.launch(
        debug=True,
        share=False,
        server_name="0.0.0.0",
        server_port=7860
    )