Gerchegg commited on
Commit
86d18c7
·
verified ·
1 Parent(s): 1449d26

Add support for local LoRA from /workspace/loras/

Browse files
Files changed (1) hide show
  1. app.py +119 -33
app.py CHANGED
@@ -148,25 +148,113 @@ def resize_image(input_image, max_size=1024):
148
  # LORA FUNCTIONS
149
  # =================================================================
150
 
151
- # Список доступных LoRA
152
- AVAILABLE_LORAS = {
 
 
 
153
  "Realism": {
154
  "repo": "flymy-ai/qwen-image-realism-lora",
155
  "trigger": "Super Realism portrait of",
156
- "weights": "pytorch_lora_weights.safetensors"
 
157
  },
158
  "Anime": {
159
  "repo": "alfredplpl/qwen-image-modern-anime-lora",
160
  "trigger": "Japanese modern anime style, ",
161
- "weights": "pytorch_lora_weights.safetensors"
 
162
  },
163
  "Analog Film": {
164
  "repo": "janekm/analog_film",
165
  "trigger": "fifthel",
166
- "weights": "converted_complete.safetensors"
 
167
  }
168
  }
169
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
  # =================================================================
171
  # GENERATION FUNCTIONS
172
  # =================================================================
@@ -212,20 +300,14 @@ def generate_text2img(
212
 
213
  try:
214
  # Загружаем LoRA если выбрана
215
- if lora_name != "None" and lora_name in AVAILABLE_LORAS:
216
- lora_info = AVAILABLE_LORAS[lora_name]
217
- logger.info(f" Loading LoRA: {lora_info['repo']}")
218
-
219
- pipe_txt2img.load_lora_weights(
220
- lora_info['repo'],
221
- weight_name=lora_info.get('weights', 'pytorch_lora_weights.safetensors'),
222
- token=hf_token
223
- )
224
 
225
- # Добавляем trigger word
226
- if lora_info['trigger']:
227
- prompt = lora_info['trigger'] + prompt
228
- logger.info(f" Added trigger: {lora_info['trigger']}")
229
 
230
  generator = torch.Generator(device=device).manual_seed(seed)
231
 
@@ -291,15 +373,13 @@ def generate_img2img(
291
  raise gr.Error("Image2Image pipeline not available")
292
 
293
  # Загружаем LoRA если выбрана
294
- if lora_name != "None" and lora_name in AVAILABLE_LORAS:
295
- lora_info = AVAILABLE_LORAS[lora_name]
296
- pipe_img2img.load_lora_weights(
297
- lora_info['repo'],
298
- weight_name=lora_info.get('weights', 'pytorch_lora_weights.safetensors'),
299
- token=hf_token
300
- )
301
- if lora_info['trigger']:
302
- prompt = lora_info['trigger'] + prompt
303
 
304
  generator = torch.Generator(device=device).manual_seed(seed)
305
 
@@ -341,7 +421,9 @@ css = """
341
  """
342
 
343
  with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
344
- gr.Markdown("""
 
 
345
  # 🎨 Qwen Soloband - Image2Image + LoRA
346
 
347
  **Продвинутая модель генерации** с поддержкой Text-to-Image, Image-to-Image и LoRA стилей.
@@ -349,11 +431,13 @@ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
349
  ### ✨ Возможности:
350
  - 🖼️ **Text-to-Image** - Генерация из текста, разрешения до 2048×2048
351
  - 🔄 **Image-to-Image** - Модификация изображений с контролем strength (0.0-1.0)
352
- - 🎭 **LoRA Support** - Динамическая загрузка стилей (Realism, Anime, Film)
353
  - 🔌 **Full API** - Все функции доступны через API
354
  - ⚡ **Optimized** - VAE tiling/slicing, правильный QwenImageImg2ImgPipeline
355
 
356
  **Модель**: [Gerchegg/Qwen-Soloband-Diffusers](https://huggingface.co/Gerchegg/Qwen-Soloband-Diffusers)
 
 
357
  """)
358
 
359
  with gr.Tabs() as tabs:
@@ -387,8 +471,9 @@ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
387
 
388
  t2i_lora = gr.Radio(
389
  label="LoRA Style",
390
- choices=["None"] + list(AVAILABLE_LORAS.keys()),
391
- value="None"
 
392
  )
393
  t2i_lora_scale = gr.Slider(label="LoRA Strength", minimum=0.0, maximum=2.0, step=0.1, value=1.0)
394
 
@@ -431,8 +516,9 @@ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
431
 
432
  i2i_lora = gr.Radio(
433
  label="LoRA Style",
434
- choices=["None"] + list(AVAILABLE_LORAS.keys()),
435
- value="None"
 
436
  )
437
  i2i_lora_scale = gr.Slider(label="LoRA Strength", minimum=0.0, maximum=2.0, step=0.1, value=1.0)
438
 
 
148
  # LORA FUNCTIONS
149
  # =================================================================
150
 
151
+ # Папка для локальных LoRA
152
+ LOCAL_LORA_DIR = "/workspace/loras"
153
+
154
+ # Базовые LoRA из HuggingFace Hub
155
+ HUB_LORAS = {
156
  "Realism": {
157
  "repo": "flymy-ai/qwen-image-realism-lora",
158
  "trigger": "Super Realism portrait of",
159
+ "weights": "pytorch_lora_weights.safetensors",
160
+ "source": "hub"
161
  },
162
  "Anime": {
163
  "repo": "alfredplpl/qwen-image-modern-anime-lora",
164
  "trigger": "Japanese modern anime style, ",
165
+ "weights": "pytorch_lora_weights.safetensors",
166
+ "source": "hub"
167
  },
168
  "Analog Film": {
169
  "repo": "janekm/analog_film",
170
  "trigger": "fifthel",
171
+ "weights": "converted_complete.safetensors",
172
+ "source": "hub"
173
  }
174
  }
175
 
176
+ def scan_local_loras():
177
+ """
178
+ Сканирует папку /workspace/loras на наличие .safetensors файлов
179
+ Возвращает dict с найденными LoRA
180
+ """
181
+ local_loras = {}
182
+
183
+ if not os.path.exists(LOCAL_LORA_DIR):
184
+ logger.info(f" Local LoRA directory not found: {LOCAL_LORA_DIR}")
185
+ return local_loras
186
+
187
+ logger.info(f" Scanning local LoRA directory: {LOCAL_LORA_DIR}")
188
+
189
+ try:
190
+ for file in os.listdir(LOCAL_LORA_DIR):
191
+ if file.endswith('.safetensors'):
192
+ lora_name = os.path.splitext(file)[0] # Имя без расширения
193
+ local_path = os.path.join(LOCAL_LORA_DIR, file)
194
+
195
+ # Добавляем в список
196
+ local_loras[lora_name] = {
197
+ "path": local_path,
198
+ "trigger": "", # Без trigger word для локальных
199
+ "weights": file,
200
+ "source": "local"
201
+ }
202
+
203
+ logger.info(f" ✓ Found local LoRA: {lora_name} ({file})")
204
+
205
+ except Exception as e:
206
+ logger.warning(f" Error scanning local LoRA directory: {e}")
207
+
208
+ return local_loras
209
+
210
+ # Сканируем локальные LoRA
211
+ logger.info("\nScanning for LoRA models...")
212
+ LOCAL_LORAS = scan_local_loras()
213
+
214
+ # Объединяем Hub и локальные LoRA
215
+ AVAILABLE_LORAS = {**HUB_LORAS, **LOCAL_LORAS}
216
+
217
+ if LOCAL_LORAS:
218
+ logger.info(f" ✓ Found {len(LOCAL_LORAS)} local LoRA(s)")
219
+ logger.info(f" Total available LoRAs: {len(AVAILABLE_LORAS)}")
220
+
221
+ def load_lora_weights(pipeline, lora_name, lora_scale, hf_token):
222
+ """
223
+ Загружает LoRA веса в pipeline
224
+ Поддерживает как Hub LoRA так и локальные
225
+ """
226
+ if lora_name == "None" or lora_name not in AVAILABLE_LORAS:
227
+ return None
228
+
229
+ lora_info = AVAILABLE_LORAS[lora_name]
230
+
231
+ try:
232
+ if lora_info['source'] == 'hub':
233
+ # Загрузка с HuggingFace Hub
234
+ logger.info(f" Loading LoRA from Hub: {lora_info['repo']}")
235
+ pipeline.load_lora_weights(
236
+ lora_info['repo'],
237
+ weight_name=lora_info.get('weights', 'pytorch_lora_weights.safetensors'),
238
+ token=hf_token
239
+ )
240
+ else:
241
+ # Загрузка локального файла
242
+ logger.info(f" Loading local LoRA: {lora_info['path']}")
243
+ pipeline.load_lora_weights(
244
+ lora_info['path'],
245
+ adapter_name=lora_name
246
+ )
247
+
248
+ # Устанавливаем scale
249
+ if hasattr(pipeline, 'set_adapters'):
250
+ pipeline.set_adapters([lora_name], adapter_weights=[lora_scale])
251
+
252
+ return lora_info.get('trigger', '')
253
+
254
+ except Exception as e:
255
+ logger.error(f" ❌ Error loading LoRA {lora_name}: {e}")
256
+ return None
257
+
258
  # =================================================================
259
  # GENERATION FUNCTIONS
260
  # =================================================================
 
300
 
301
  try:
302
  # Загружаем LoRA если выбрана
303
+ trigger_word = None
304
+ if lora_name != "None":
305
+ trigger_word = load_lora_weights(pipe_txt2img, lora_name, lora_scale, hf_token)
 
 
 
 
 
 
306
 
307
+ # Добавляем trigger word если есть
308
+ if trigger_word:
309
+ prompt = trigger_word + prompt
310
+ logger.info(f" Added trigger: {trigger_word}")
311
 
312
  generator = torch.Generator(device=device).manual_seed(seed)
313
 
 
373
  raise gr.Error("Image2Image pipeline not available")
374
 
375
  # Загружаем LoRA если выбрана
376
+ trigger_word = None
377
+ if lora_name != "None":
378
+ trigger_word = load_lora_weights(pipe_img2img, lora_name, lora_scale, hf_token)
379
+
380
+ # Добавляем trigger word если есть
381
+ if trigger_word:
382
+ prompt = trigger_word + prompt
 
 
383
 
384
  generator = torch.Generator(device=device).manual_seed(seed)
385
 
 
421
  """
422
 
423
  with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
424
+ lora_choices = ["None"] + list(AVAILABLE_LORAS.keys())
425
+
426
+ gr.Markdown(f"""
427
  # 🎨 Qwen Soloband - Image2Image + LoRA
428
 
429
  **Продвинутая модель генерации** с поддержкой Text-to-Image, Image-to-Image и LoRA стилей.
 
431
  ### ✨ Возможности:
432
  - 🖼️ **Text-to-Image** - Генерация из текста, разрешения до 2048×2048
433
  - 🔄 **Image-to-Image** - Модификация изображений с контролем strength (0.0-1.0)
434
+ - 🎭 **LoRA Support** - {len(AVAILABLE_LORAS)} доступных стилей (Hub + локальные)
435
  - 🔌 **Full API** - Все функции доступны через API
436
  - ⚡ **Optimized** - VAE tiling/slicing, правильный QwenImageImg2ImgPipeline
437
 
438
  **Модель**: [Gerchegg/Qwen-Soloband-Diffusers](https://huggingface.co/Gerchegg/Qwen-Soloband-Diffusers)
439
+
440
+ 💡 **Local LoRAs**: Положите .safetensors файлы в `/workspace/loras/` - они появятся автоматически!
441
  """)
442
 
443
  with gr.Tabs() as tabs:
 
471
 
472
  t2i_lora = gr.Radio(
473
  label="LoRA Style",
474
+ choices=lora_choices,
475
+ value="None",
476
+ info=f"Hub: {len(HUB_LORAS)}, Local: {len(LOCAL_LORAS)}"
477
  )
478
  t2i_lora_scale = gr.Slider(label="LoRA Strength", minimum=0.0, maximum=2.0, step=0.1, value=1.0)
479
 
 
516
 
517
  i2i_lora = gr.Radio(
518
  label="LoRA Style",
519
+ choices=lora_choices,
520
+ value="None",
521
+ info=f"Hub: {len(HUB_LORAS)}, Local: {len(LOCAL_LORAS)}"
522
  )
523
  i2i_lora_scale = gr.Slider(label="LoRA Strength", minimum=0.0, maximum=2.0, step=0.1, value=1.0)
524