kayte0342 commited on
Commit
41ab360
·
verified ·
1 Parent(s): 158cf4d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -46
app.py CHANGED
@@ -160,21 +160,21 @@ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps
160
  return final_image
161
 
162
  @spaces.GPU(duration=70)
163
- def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
164
- print("Selected indices (raw):", selected_indices)
165
- # Then parse them if needed
166
- parsed_indices = parse_selected_indices(selected_indices)
167
- print("Parsed indices:", parsed_indices)
168
  # Ensure at least one LoRA is selected
169
  if not selected_indices or len(selected_indices) == 0:
170
  raise gr.Error("You must select at least one LoRA before proceeding.")
171
 
172
- # Combine trigger words from all selected LoRAs
173
  prompt_mash = prompt
174
  for idx in selected_indices:
175
  selected_lora = loras[idx]
176
  if "trigger_word" in selected_lora and selected_lora["trigger_word"]:
177
- # Prepend each trigger word to the prompt
178
  prompt_mash = f"{selected_lora['trigger_word']} {prompt_mash}"
179
 
180
  # Unload any previously loaded LoRA weights
@@ -184,14 +184,14 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
184
 
185
  # Load each selected LoRA weight sequentially
186
  with calculateDuration("Loading LoRA weights"):
187
- # Choose which pipeline to use based on whether an input image is provided
188
  pipe_to_use = pipe_i2i if image_input is not None else pipe
189
  for idx in selected_indices:
190
  selected_lora = loras[idx]
191
  weight_name = selected_lora.get("weights", None)
192
  pipe_to_use.load_lora_weights(
193
- selected_lora["repo"],
194
- weight_name=weight_name,
195
  low_cpu_mem_usage=True
196
  )
197
 
@@ -202,10 +202,16 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
202
 
203
  # Generate image(s)
204
  if image_input is not None:
205
- final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, lora_scale, seed)
 
 
 
206
  yield final_image, seed, gr.update(visible=False)
207
  else:
208
- image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress)
 
 
 
209
  final_image = None
210
  step_counter = 0
211
  for image in image_generator:
@@ -215,6 +221,7 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
215
  yield image, seed, gr.update(value=progress_bar, visible=True)
216
  yield final_image, seed, gr.update(value=progress_bar, visible=False)
217
 
 
218
 
219
  def get_huggingface_safetensors(link):
220
  split_link = link.split("/")
@@ -310,38 +317,40 @@ css = '''
310
  .progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
311
  '''
312
  font=[gr.themes.GoogleFont("Source Sans Pro"), "Arial", "sans-serif"]
 
313
  with gr.Blocks(theme=gr.themes.Soft(font=font), css=css, delete_cache=(60, 60)) as app:
314
  title = gr.HTML(
315
  """<h1><img src="https://huggingface.co/spaces/kayte0342/test/resolve/main/DA4BE61E-A0BD-4254-A1B6-AD3C05D18A9C%20(1).png?download=true" alt="LoRA"> FLUX LoRA Kayte's Space</h1>""",
316
  elem_id="title",
317
  )
318
- selected_index = gr.State(None)
 
 
 
319
  with gr.Row():
320
  with gr.Column(scale=3):
321
  prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting a LoRA")
322
  with gr.Column(scale=1, elem_id="gen_column"):
323
  generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
 
324
  with gr.Row():
325
  with gr.Column():
326
  selected_info = gr.Markdown("")
327
- gallery = gr.Gallery(
328
- [(item["image"], item["title"]) for item in loras],
329
- label="LoRA Gallery",
330
- #select_mode="multiple", # Ensure multi-selection is enabled
331
- allow_preview=False,
332
- columns=3,
333
- elem_id="gallery",
334
- show_share_button=False
335
- )
336
- with gr.Group():
337
- custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path", placeholder="multimodalart/vintage-ads-flux")
338
- gr.Markdown("[Check the list of FLUX LoRas](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
339
- custom_lora_info = gr.HTML(visible=False)
340
- custom_lora_button = gr.Button("Remove custom LoRA", visible=False)
341
  with gr.Column():
342
- progress_bar = gr.Markdown(elem_id="progress",visible=False)
343
  result = gr.Image(label="Generated Image")
344
-
345
  with gr.Row():
346
  with gr.Accordion("Advanced Settings", open=False):
347
  with gr.Row():
@@ -351,34 +360,52 @@ with gr.Blocks(theme=gr.themes.Soft(font=font), css=css, delete_cache=(60, 60))
351
  with gr.Row():
352
  cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
353
  steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=28)
354
-
355
  with gr.Row():
356
  width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
357
  height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
358
-
359
  with gr.Row():
360
  randomize_seed = gr.Checkbox(True, label="Randomize seed")
361
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
362
  lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=3, step=0.01, value=0.95)
363
-
364
- gallery.select(
365
- update_selection,
366
- inputs=[width, height],
367
- outputs=[prompt, selected_info, selected_index, width, height]
 
 
 
 
 
 
368
  )
369
- custom_lora.input(
370
- add_custom_lora,
371
- inputs=[custom_lora],
372
- outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt]
 
 
 
 
 
 
 
 
 
373
  )
374
- custom_lora_button.click(
375
- remove_custom_lora,
376
- outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora]
 
 
 
377
  )
 
 
378
  gr.on(
379
  triggers=[generate_button.click, prompt.submit],
380
- fn=run_lora,
381
- inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale],
382
  outputs=[result, seed, progress_bar]
383
  )
384
 
 
160
  return final_image
161
 
162
  @spaces.GPU(duration=70)
163
+ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices_json, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
164
+ import json
165
+ # Parse the JSON string into a list of indices
166
+ selected_indices = json.loads(selected_indices_json)
167
+
168
  # Ensure at least one LoRA is selected
169
  if not selected_indices or len(selected_indices) == 0:
170
  raise gr.Error("You must select at least one LoRA before proceeding.")
171
 
172
+ # Combine trigger words from all selected LoRAs into the prompt
173
  prompt_mash = prompt
174
  for idx in selected_indices:
175
  selected_lora = loras[idx]
176
  if "trigger_word" in selected_lora and selected_lora["trigger_word"]:
177
+ # Prepend each trigger word to the prompt; you can adjust the order or separator as needed
178
  prompt_mash = f"{selected_lora['trigger_word']} {prompt_mash}"
179
 
180
  # Unload any previously loaded LoRA weights
 
184
 
185
  # Load each selected LoRA weight sequentially
186
  with calculateDuration("Loading LoRA weights"):
187
+ # Use the image-to-image pipeline if an input image is provided, else the text-to-image pipeline
188
  pipe_to_use = pipe_i2i if image_input is not None else pipe
189
  for idx in selected_indices:
190
  selected_lora = loras[idx]
191
  weight_name = selected_lora.get("weights", None)
192
  pipe_to_use.load_lora_weights(
193
+ selected_lora["repo"],
194
+ weight_name=weight_name,
195
  low_cpu_mem_usage=True
196
  )
197
 
 
202
 
203
  # Generate image(s)
204
  if image_input is not None:
205
+ # Image-to-image generation
206
+ final_image = generate_image_to_image(
207
+ prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, lora_scale, seed
208
+ )
209
  yield final_image, seed, gr.update(visible=False)
210
  else:
211
+ # Text-to-image generation
212
+ image_generator = generate_image(
213
+ prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress
214
+ )
215
  final_image = None
216
  step_counter = 0
217
  for image in image_generator:
 
221
  yield image, seed, gr.update(value=progress_bar, visible=True)
222
  yield final_image, seed, gr.update(value=progress_bar, visible=False)
223
 
224
+
225
 
226
  def get_huggingface_safetensors(link):
227
  split_link = link.split("/")
 
317
  .progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
318
  '''
319
  font=[gr.themes.GoogleFont("Source Sans Pro"), "Arial", "sans-serif"]
320
+ # Begin Gradio Blocks
321
  with gr.Blocks(theme=gr.themes.Soft(font=font), css=css, delete_cache=(60, 60)) as app:
322
  title = gr.HTML(
323
  """<h1><img src="https://huggingface.co/spaces/kayte0342/test/resolve/main/DA4BE61E-A0BD-4254-A1B6-AD3C05D18A9C%20(1).png?download=true" alt="LoRA"> FLUX LoRA Kayte's Space</h1>""",
324
  elem_id="title",
325
  )
326
+
327
+ # Hidden textbox to store the JSON string of selected indices
328
+ selected_indices_hidden = gr.Textbox(value="[]", visible=False)
329
+
330
  with gr.Row():
331
  with gr.Column(scale=3):
332
  prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting a LoRA")
333
  with gr.Column(scale=1, elem_id="gen_column"):
334
  generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
335
+
336
  with gr.Row():
337
  with gr.Column():
338
  selected_info = gr.Markdown("")
339
+ # Create a custom container for LoRA selection with checkboxes
340
+ lora_selection_container = gr.Column()
341
+ # We'll collect individual checkbox components in a list for later use
342
+ lora_checkbox_list = []
343
+ for idx, lora in enumerate(loras):
344
+ with gr.Row():
345
+ gr.Image(value=lora["image"], label=lora["title"], height=100)
346
+ checkbox = gr.Checkbox(label="Select", value=False, elem_id=f"lora_checkbox_{idx}")
347
+ lora_checkbox_list.append(checkbox)
348
+ # Add a hidden update button (invisible) to update the hidden state; it can be triggered programmatically.
349
+ update_selection_btn = gr.Button("Update LoRA Selection", visible=False)
 
 
 
350
  with gr.Column():
351
+ progress_bar = gr.Markdown(elem_id="progress", visible=False)
352
  result = gr.Image(label="Generated Image")
353
+
354
  with gr.Row():
355
  with gr.Accordion("Advanced Settings", open=False):
356
  with gr.Row():
 
360
  with gr.Row():
361
  cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
362
  steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=28)
 
363
  with gr.Row():
364
  width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
365
  height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
 
366
  with gr.Row():
367
  randomize_seed = gr.Checkbox(True, label="Randomize seed")
368
+ seed = gr.Slider(label="Seed", minimum=0, maximum=2**32-1, step=1, value=0, randomize=True)
369
  lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=3, step=0.01, value=0.95)
370
+
371
+ # Function to combine checkbox states into a JSON list of selected indices.
372
+ def combine_selections(*checkbox_values):
373
+ selected_indices = [i for i, v in enumerate(checkbox_values) if v]
374
+ return json.dumps(selected_indices)
375
+
376
+ # When the (invisible) update button is clicked, update the hidden state.
377
+ update_selection_btn.click(
378
+ combine_selections,
379
+ inputs=lora_checkbox_list,
380
+ outputs=selected_indices_hidden
381
  )
382
+
383
+ # Also update the selected_info Markdown when the hidden state changes.
384
+ def update_info(selected_json):
385
+ selected_indices = json.loads(selected_json)
386
+ if selected_indices:
387
+ info = "Selected LoRAs: " + ", ".join([loras[i]["title"] for i in selected_indices])
388
+ else:
389
+ info = "No LoRAs selected."
390
+ return info
391
+ selected_info.change(
392
+ update_info,
393
+ inputs=selected_indices_hidden,
394
+ outputs=selected_info
395
  )
396
+
397
+ # Also, when the Generate button is clicked, update the hidden state from the checkboxes.
398
+ generate_button.click(
399
+ combine_selections,
400
+ inputs=lora_checkbox_list,
401
+ outputs=selected_indices_hidden
402
  )
403
+
404
+ # Finally, trigger the generation function (run_lora). Note that run_lora should be modified to parse the JSON string.
405
  gr.on(
406
  triggers=[generate_button.click, prompt.submit],
407
+ fn=run_lora, # Make sure run_lora begins by parsing the JSON: selected_indices = json.loads(selected_indices_hidden)
408
+ inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_indices_hidden, randomize_seed, seed, width, height, lora_scale],
409
  outputs=[result, seed, progress_bar]
410
  )
411