Gerchegg commited on
Commit
4812504
·
verified ·
1 Parent(s): 1eb2940

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +561 -0
app.py ADDED
@@ -0,0 +1,561 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import random
4
+ import json
5
+ import torch
6
+ from PIL import Image
7
+ import os
8
+ import time
9
+ import logging
10
+
11
+ # Опциональный импорт spaces для работы в Runpod
12
+ try:
13
+ import spaces
14
+ SPACES_AVAILABLE = True
15
+ except ImportError:
16
+ SPACES_AVAILABLE = False
17
+ logger = logging.getLogger(__name__)
18
+ logger.warning("⚠️ spaces module not available - running without ZeroGPU support")
19
+
20
+ from diffusers import (
21
+ DiffusionPipeline,
22
+ QwenImageImg2ImgPipeline
23
+ )
24
+ from huggingface_hub import hf_hub_download
25
+
26
+ # Настройка логирования
27
+ logging.basicConfig(
28
+ level=logging.INFO,
29
+ format='%(asctime)s | %(levelname)s | %(message)s',
30
+ datefmt='%Y-%m-%d %H:%M:%S'
31
+ )
32
+ logger = logging.getLogger(__name__)
33
+
34
+ logger.info("=" * 60)
35
+ logger.info("LOADING QWEN-SOLOBAND ADVANCED")
36
+ logger.info("=" * 60)
37
+
38
+ hf_token = os.environ.get("HF_TOKEN")
39
+ device = "cuda" if torch.cuda.is_available() else "cpu"
40
+ dtype = torch.bfloat16
41
+
42
+ # Логируем GPU
43
+ logger.info(f"CUDA available: {torch.cuda.is_available()}")
44
+ if torch.cuda.is_available():
45
+ gpu_count = torch.cuda.device_count()
46
+ logger.info(f"Number of GPUs: {gpu_count}")
47
+ for i in range(gpu_count):
48
+ logger.info(f" GPU {i}: {torch.cuda.get_device_name(i)}")
49
+ logger.info(f" Memory: {torch.cuda.get_device_properties(i).total_memory / 1024**3:.1f} GB")
50
+
51
+ # =================================================================
52
+ # ЗАГРУЗКА МОДЕЛЕЙ
53
+ # =================================================================
54
+
55
+ # 1. Базовая модель для Text-to-Image
56
+ logger.info("\n[1/3] Loading base Text2Image model...")
57
+ model_id = "Gerchegg/Qwen-Soloband-Diffusers"
58
+
59
+ try:
60
+ start_time = time.time()
61
+
62
+ # Определяем device_map
63
+ if gpu_count > 1:
64
+ device_map = "balanced"
65
+ logger.info(f" Device map: balanced ({gpu_count} GPUs)")
66
+ else:
67
+ device_map = None
68
+ logger.info(" Device map: single GPU")
69
+
70
+ # Загружаем базовую модель
71
+ pipe_txt2img = DiffusionPipeline.from_pretrained(
72
+ model_id,
73
+ torch_dtype=dtype,
74
+ device_map=device_map,
75
+ token=hf_token
76
+ )
77
+
78
+ if device_map is None:
79
+ pipe_txt2img.to(device)
80
+
81
+ load_time = time.time() - start_time
82
+ logger.info(f" ✓ Text2Image loaded in {load_time:.1f}s")
83
+
84
+ except Exception as e:
85
+ logger.error(f" ❌ Error loading Text2Image: {e}")
86
+ raise
87
+
88
+ # 2. Image-to-Image модель (используем те же компоненты)
89
+ logger.info("\n[2/3] Creating Image2Image pipeline...")
90
+ try:
91
+ # Создаем QwenImageImg2ImgPipeline переиспользуя компоненты Text2Image pipeline
92
+ # Это правильный способ для Qwen-Image архитектуры
93
+ pipe_img2img = QwenImageImg2ImgPipeline(
94
+ vae=pipe_txt2img.vae,
95
+ text_encoder=pipe_txt2img.text_encoder,
96
+ tokenizer=pipe_txt2img.tokenizer,
97
+ transformer=pipe_txt2img.transformer,
98
+ scheduler=pipe_txt2img.scheduler
99
+ )
100
+ logger.info(" ✓ Image2Image pipeline created (reusing components)")
101
+ except Exception as e:
102
+ logger.error(f" ❌ Error creating Image2Image: {e}")
103
+ pipe_img2img = None
104
+
105
+ # ControlNet не используется - убран для упрощения
106
+
107
+ # Оптимизации памяти
108
+ logger.info("\nApplying memory optimizations...")
109
+ for pipe in [pipe_txt2img, pipe_img2img]:
110
+ if pipe and hasattr(pipe, 'vae'):
111
+ if hasattr(pipe.vae, 'enable_tiling'):
112
+ pipe.vae.enable_tiling()
113
+ if hasattr(pipe.vae, 'enable_slicing'):
114
+ pipe.vae.enable_slicing()
115
+
116
+ logger.info(" ✓ VAE tiling and slicing enabled")
117
+
118
+ logger.info("\n" + "=" * 60)
119
+ logger.info("✓ ALL MODELS LOADED")
120
+ logger.info("=" * 60)
121
+
122
+ # =================================================================
123
+ # HELPER FUNCTIONS
124
+ # =================================================================
125
+
126
+ def resize_image(input_image, max_size=1024):
127
+ """Изменяет размер изображения с сохранением пропорций (кратно 8)"""
128
+ w, h = input_image.size
129
+ aspect_ratio = w / h
130
+
131
+ if w > h:
132
+ new_w = max_size
133
+ new_h = int(new_w / aspect_ratio)
134
+ else:
135
+ new_h = max_size
136
+ new_w = int(new_h * aspect_ratio)
137
+
138
+ # Кратно 8
139
+ new_w = new_w - (new_w % 8)
140
+ new_h = new_h - (new_h % 8)
141
+
142
+ if new_w == 0: new_w = 8
143
+ if new_h == 0: new_h = 8
144
+
145
+ return input_image.resize((new_w, new_h), Image.Resampling.LANCZOS)
146
+
147
+ # =================================================================
148
+ # LORA FUNCTIONS
149
+ # =================================================================
150
+
151
+ # Папка для локальных LoRA
152
+ LOCAL_LORA_DIR = "/workspace/loras"
153
+
154
+ # Базовые LoRA из HuggingFace Hub (загружаются по требованию)
155
+ HUB_LORAS = {
156
+ "Realism": {
157
+ "repo": "flymy-ai/qwen-image-realism-lora",
158
+ "trigger": "Super Realism portrait of",
159
+ "weights": "pytorch_lora_weights.safetensors",
160
+ "source": "hub"
161
+ },
162
+ "Anime": {
163
+ "repo": "alfredplpl/qwen-image-modern-anime-lora",
164
+ "trigger": "Japanese modern anime style, ",
165
+ "weights": "pytorch_lora_weights.safetensors",
166
+ "source": "hub"
167
+ }
168
+ # Другие LoRA положите в /workspace/loras/ как .safetensors файлы
169
+ }
170
+
171
+ def scan_local_loras():
172
+ """
173
+ Сканирует папку /workspace/loras на наличие .safetensors файлов
174
+ Возвращает dict с найденными LoRA
175
+ """
176
+ local_loras = {}
177
+
178
+ if not os.path.exists(LOCAL_LORA_DIR):
179
+ logger.info(f" Local LoRA directory not found: {LOCAL_LORA_DIR}")
180
+ return local_loras
181
+
182
+ logger.info(f" Scanning local LoRA directory: {LOCAL_LORA_DIR}")
183
+
184
+ try:
185
+ for file in os.listdir(LOCAL_LORA_DIR):
186
+ if file.endswith('.safetensors'):
187
+ lora_name = os.path.splitext(file)[0] # Имя без расширения
188
+ local_path = os.path.join(LOCAL_LORA_DIR, file)
189
+
190
+ # Добавляем в список
191
+ local_loras[lora_name] = {
192
+ "path": local_path,
193
+ "trigger": "", # Без trigger word для локальных
194
+ "weights": file,
195
+ "source": "local"
196
+ }
197
+
198
+ logger.info(f" ✓ Found local LoRA: {lora_name} ({file})")
199
+
200
+ except Exception as e:
201
+ logger.warning(f" Error scanning local LoRA directory: {e}")
202
+
203
+ return local_loras
204
+
205
+ # Сканируем локальные LoRA
206
+ logger.info("\nScanning for LoRA models...")
207
+ LOCAL_LORAS = scan_local_loras()
208
+
209
+ # Объединяем Hub и локальные LoRA
210
+ AVAILABLE_LORAS = {**HUB_LORAS, **LOCAL_LORAS}
211
+
212
+ if LOCAL_LORAS:
213
+ logger.info(f" ✓ Found {len(LOCAL_LORAS)} local LoRA(s)")
214
+ logger.info(f" Total available LoRAs: {len(AVAILABLE_LORAS)}")
215
+
216
+ def load_lora_weights(pipeline, lora_name, lora_scale, hf_token):
217
+ """
218
+ Загружает LoRA веса в pipeline (ленивая загрузка)
219
+ Hub LoRA скачиваются только при использовании
220
+ Локальные LoRA загружаются из /workspace/loras/
221
+ """
222
+ if lora_name == "None" or lora_name not in AVAILABLE_LORAS:
223
+ return None
224
+
225
+ lora_info = AVAILABLE_LORAS[lora_name]
226
+
227
+ try:
228
+ if lora_info['source'] == 'hub':
229
+ # Ленивая загрузка с HuggingFace Hub (скачивается при первом использовании)
230
+ logger.info(f" Loading LoRA from Hub: {lora_info['repo']}")
231
+ logger.info(f" (Downloading if not cached...)")
232
+
233
+ pipeline.load_lora_weights(
234
+ lora_info['repo'],
235
+ weight_name=lora_info.get('weights', 'pytorch_lora_weights.safetensors'),
236
+ token=hf_token
237
+ )
238
+
239
+ logger.info(f" ✓ Hub LoRA loaded (cached for future use)")
240
+ else:
241
+ # Загрузка локального файла из /workspace/loras/
242
+ logger.info(f" Loading local LoRA: {lora_info['path']}")
243
+
244
+ pipeline.load_lora_weights(
245
+ lora_info['path'],
246
+ adapter_name=lora_name
247
+ )
248
+
249
+ logger.info(f" ✓ Local LoRA loaded")
250
+
251
+ # Устанавливаем scale
252
+ if hasattr(pipeline, 'set_adapters'):
253
+ pipeline.set_adapters([lora_name], adapter_weights=[lora_scale])
254
+
255
+ return lora_info.get('trigger', '')
256
+
257
+ except Exception as e:
258
+ logger.error(f" ❌ Error loading LoRA {lora_name}: {e}")
259
+ return None
260
+
261
+ # =================================================================
262
+ # GENERATION FUNCTIONS
263
+ # =================================================================
264
+
265
+ MAX_SEED = np.iinfo(np.int32).max
266
+
267
+ # Декоратор для spaces если доступен
268
+ def gpu_decorator(duration=180):
269
+ def decorator(func):
270
+ if SPACES_AVAILABLE:
271
+ return spaces.GPU(duration=duration)(func)
272
+ return func
273
+ return decorator
274
+
275
+ @gpu_decorator(duration=180)
276
+ def generate_text2img(
277
+ prompt,
278
+ negative_prompt=" ",
279
+ width=1664,
280
+ height=928,
281
+ seed=42,
282
+ randomize_seed=False,
283
+ guidance_scale=2.5,
284
+ num_inference_steps=40,
285
+ lora_name="None",
286
+ lora_scale=1.0,
287
+ progress=gr.Progress(track_tqdm=True)
288
+ ):
289
+ """Text-to-Image генерация"""
290
+
291
+ logger.info("\n" + "=" * 60)
292
+ logger.info("TEXT-TO-IMAGE GENERATION")
293
+ logger.info("=" * 60)
294
+
295
+ if randomize_seed:
296
+ seed = random.randint(0, MAX_SEED)
297
+
298
+ logger.info(f" Prompt: {prompt[:100]}...")
299
+ logger.info(f" Size: {width}x{height}")
300
+ logger.info(f" Steps: {num_inference_steps}, CFG: {guidance_scale}")
301
+ logger.info(f" Seed: {seed}")
302
+ logger.info(f" LoRA: {lora_name} (scale: {lora_scale})")
303
+
304
+ try:
305
+ # Загружаем LoRA если выбрана
306
+ trigger_word = None
307
+ if lora_name != "None":
308
+ trigger_word = load_lora_weights(pipe_txt2img, lora_name, lora_scale, hf_token)
309
+
310
+ # Добавляем trigger word если есть
311
+ if trigger_word:
312
+ prompt = trigger_word + prompt
313
+ logger.info(f" Added trigger: {trigger_word}")
314
+
315
+ generator = torch.Generator(device=device).manual_seed(seed)
316
+
317
+ image = pipe_txt2img(
318
+ prompt=prompt,
319
+ negative_prompt=negative_prompt,
320
+ width=width,
321
+ height=height,
322
+ num_inference_steps=num_inference_steps,
323
+ true_cfg_scale=guidance_scale,
324
+ generator=generator
325
+ ).images[0]
326
+
327
+ # Выгружаем LoRA после генерации
328
+ if lora_name != "None":
329
+ pipe_txt2img.unload_lora_weights()
330
+
331
+ logger.info(" ✓ Generation completed")
332
+
333
+ return image, seed
334
+
335
+ except Exception as e:
336
+ logger.error(f" ❌ Error: {e}")
337
+ raise
338
+
339
+ @gpu_decorator(duration=180)
340
+ def generate_img2img(
341
+ input_image,
342
+ prompt,
343
+ negative_prompt=" ",
344
+ strength=0.75,
345
+ seed=42,
346
+ randomize_seed=False,
347
+ guidance_scale=2.5,
348
+ num_inference_steps=40,
349
+ lora_name="None",
350
+ lora_scale=1.0,
351
+ progress=gr.Progress(track_tqdm=True)
352
+ ):
353
+ """Image-to-Image генерация"""
354
+
355
+ logger.info("\n" + "=" * 60)
356
+ logger.info("IMAGE-TO-IMAGE GENERATION")
357
+ logger.info("=" * 60)
358
+
359
+ if input_image is None:
360
+ raise gr.Error("Please upload an input image")
361
+
362
+ if randomize_seed:
363
+ seed = random.randint(0, MAX_SEED)
364
+
365
+ # Изменяем размер изображения
366
+ resized = resize_image(input_image, max_size=1024)
367
+
368
+ logger.info(f" Prompt: {prompt[:100]}...")
369
+ logger.info(f" Input size: {input_image.size} → {resized.size}")
370
+ logger.info(f" Strength: {strength}")
371
+ logger.info(f" Steps: {num_inference_steps}, CFG: {guidance_scale}")
372
+ logger.info(f" LoRA: {lora_name}")
373
+
374
+ try:
375
+ if pipe_img2img is None:
376
+ raise gr.Error("Image2Image pipeline not available")
377
+
378
+ # Загружаем LoRA если выбрана
379
+ trigger_word = None
380
+ if lora_name != "None":
381
+ trigger_word = load_lora_weights(pipe_img2img, lora_name, lora_scale, hf_token)
382
+
383
+ # Добавляем trigger word если есть
384
+ if trigger_word:
385
+ prompt = trigger_word + prompt
386
+
387
+ generator = torch.Generator(device=device).manual_seed(seed)
388
+
389
+ image = pipe_img2img(
390
+ prompt=prompt,
391
+ negative_prompt=negative_prompt,
392
+ image=resized,
393
+ strength=strength,
394
+ num_inference_steps=num_inference_steps,
395
+ true_cfg_scale=guidance_scale,
396
+ generator=generator
397
+ ).images[0]
398
+
399
+ # Выгружаем LoRA
400
+ if lora_name != "None":
401
+ pipe_img2img.unload_lora_weights()
402
+
403
+ logger.info(" ✓ Generation completed")
404
+
405
+ return image, seed
406
+
407
+ except Exception as e:
408
+ logger.error(f" ❌ Error: {e}")
409
+ raise
410
+
411
+ # ControlNet функция убрана - не используется
412
+
413
+ # =================================================================
414
+ # GRADIO INTERFACE
415
+ # =================================================================
416
+
417
+ MAX_SEED = np.iinfo(np.int32).max
418
+
419
+ css = """
420
+ #col-container {
421
+ margin: 0 auto;
422
+ max-width: 1400px;
423
+ }
424
+ """
425
+
426
+ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
427
+ lora_choices = ["None"] + list(AVAILABLE_LORAS.keys())
428
+
429
+ gr.Markdown(f"""
430
+ # 🎨 Qwen Soloband - Image2Image + LoRA
431
+
432
+ **Продвинутая модель генерации** с поддержкой Text-to-Image, Image-to-Image и LoRA стилей.
433
+
434
+ ### ✨ Возможности:
435
+ - 🖼️ **Text-to-Image** - Генерация из текста, разрешения до 2048×2048
436
+ - 🔄 **Image-to-Image** - Модификация изображений с контролем strength (0.0-1.0)
437
+ - 🎭 **LoRA Support** - {len(AVAILABLE_LORAS)} доступных стилей (Hub + локальные)
438
+ - 🔌 **Full API** - Все функции доступны через API
439
+ - ⚡ **Optimized** - VAE tiling/slicing, правильный QwenImageImg2ImgPipeline
440
+
441
+ **Модель**: [Gerchegg/Qwen-Soloband-Diffusers](https://huggingface.co/Gerchegg/Qwen-Soloband-Diffusers)
442
+
443
+ 💡 **Local LoRAs**: Положите .safetensors файлы в `/workspace/loras/` - они появятся автоматически!
444
+ """)
445
+
446
+ with gr.Tabs() as tabs:
447
+
448
+ # TAB 1: Text-to-Image
449
+ with gr.Tab("📝 Text-to-Image"):
450
+ with gr.Row():
451
+ with gr.Column(scale=1):
452
+ t2i_prompt = gr.Text(
453
+ label="Prompt",
454
+ placeholder="SB_AI, a beautiful landscape...",
455
+ lines=3
456
+ )
457
+
458
+ t2i_run = gr.Button("Generate", variant="primary")
459
+
460
+ with gr.Accordion("Advanced Settings", open=False):
461
+ t2i_negative = gr.Text(label="Negative Prompt", value="blurry, low quality")
462
+
463
+ with gr.Row():
464
+ t2i_width = gr.Slider(label="Width", minimum=512, maximum=2048, step=64, value=1664)
465
+ t2i_height = gr.Slider(label="Height", minimum=512, maximum=2048, step=64, value=928)
466
+
467
+ with gr.Row():
468
+ t2i_steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=40)
469
+ t2i_cfg = gr.Slider(label="CFG", minimum=0.0, maximum=7.5, step=0.1, value=2.5)
470
+
471
+ with gr.Row():
472
+ t2i_seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
473
+ t2i_random_seed = gr.Checkbox(label="Random", value=True)
474
+
475
+ t2i_lora = gr.Radio(
476
+ label="LoRA Style",
477
+ choices=lora_choices,
478
+ value="None",
479
+ info=f"Hub: {len(HUB_LORAS)}, Local: {len(LOCAL_LORAS)}"
480
+ )
481
+ t2i_lora_scale = gr.Slider(label="LoRA Strength", minimum=0.0, maximum=2.0, step=0.1, value=1.0)
482
+
483
+ with gr.Column(scale=1):
484
+ t2i_output = gr.Image(label="Generated Image")
485
+ t2i_seed_output = gr.Number(label="Used Seed")
486
+
487
+ # TAB 2: Image-to-Image
488
+ with gr.Tab("🔄 Image-to-Image"):
489
+ with gr.Row():
490
+ with gr.Column(scale=1):
491
+ i2i_input = gr.Image(type="pil", label="Input Image")
492
+ i2i_prompt = gr.Text(
493
+ label="Prompt",
494
+ placeholder="Transform this image into...",
495
+ lines=3
496
+ )
497
+
498
+ i2i_strength = gr.Slider(
499
+ label="Denoising Strength",
500
+ info="0.0 = original image, 1.0 = complete redraw",
501
+ minimum=0.0,
502
+ maximum=1.0,
503
+ step=0.05,
504
+ value=0.75
505
+ )
506
+
507
+ i2i_run = gr.Button("Generate", variant="primary")
508
+
509
+ with gr.Accordion("Advanced Settings", open=False):
510
+ i2i_negative = gr.Text(label="Negative Prompt", value="blurry, low quality")
511
+
512
+ with gr.Row():
513
+ i2i_steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=40)
514
+ i2i_cfg = gr.Slider(label="CFG", minimum=0.0, maximum=7.5, step=0.1, value=2.5)
515
+
516
+ with gr.Row():
517
+ i2i_seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
518
+ i2i_random_seed = gr.Checkbox(label="Random", value=True)
519
+
520
+ i2i_lora = gr.Radio(
521
+ label="LoRA Style",
522
+ choices=lora_choices,
523
+ value="None",
524
+ info=f"Hub: {len(HUB_LORAS)}, Local: {len(LOCAL_LORAS)}"
525
+ )
526
+ i2i_lora_scale = gr.Slider(label="LoRA Strength", minimum=0.0, maximum=2.0, step=0.1, value=1.0)
527
+
528
+ with gr.Column(scale=1):
529
+ i2i_output = gr.Image(label="Generated Image")
530
+ i2i_seed_output = gr.Number(label="Used Seed")
531
+
532
+
533
+ # Event handlers
534
+ t2i_run.click(
535
+ fn=generate_text2img,
536
+ inputs=[
537
+ t2i_prompt, t2i_negative, t2i_width, t2i_height,
538
+ t2i_seed, t2i_random_seed, t2i_cfg, t2i_steps,
539
+ t2i_lora, t2i_lora_scale
540
+ ],
541
+ outputs=[t2i_output, t2i_seed_output],
542
+ api_name="text2img"
543
+ )
544
+
545
+ i2i_run.click(
546
+ fn=generate_img2img,
547
+ inputs=[
548
+ i2i_input, i2i_prompt, i2i_negative, i2i_strength,
549
+ i2i_seed, i2i_random_seed, i2i_cfg, i2i_steps,
550
+ i2i_lora, i2i_lora_scale
551
+ ],
552
+ outputs=[i2i_output, i2i_seed_output],
553
+ api_name="img2img"
554
+ )
555
+
556
+ if __name__ == "__main__":
557
+ demo.launch(
558
+ show_api=True,
559
+ share=False
560
+ )
561
+