phxdev Claude commited on
Commit
6c51bd3
·
1 Parent(s): f21eb5d

Apply all LoRAs simultaneously by default

Browse files

- Load and apply all 4 LoRAs at startup with optimal scaling
- Remove LoRA dropdown - all are active by default
- Use adapter names for simultaneous LoRA application
- Show status that all LoRAs are active
- AntiBlur (0.8) + Add Details (1.2) + Ultra Realism (0.9) + Face Realism (1.1)

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

Files changed (1) hide show
  1. app.py +18 -29
app.py CHANGED
@@ -65,11 +65,11 @@ def download_lora_from_url(url, filename):
65
  print(f"Downloaded {filename}")
66
  return filename
67
 
68
- def preload_loras():
69
- """Download all LoRAs at startup for later use"""
70
  global loaded_loras
71
 
72
- print("Downloading all LoRAs...")
73
 
74
  for lora_name, lora_path in LORAS.items():
75
  if lora_name == "None" or lora_path is None:
@@ -82,8 +82,16 @@ def preload_loras():
82
 
83
  loaded_loras[lora_name] = lora_path
84
  print(f"Downloaded {lora_name}")
 
 
 
 
 
 
 
 
85
 
86
- print(f"All {len(loaded_loras)} LoRAs downloaded and ready!")
87
 
88
  def get_optimal_lora_scale(lora_name):
89
  """Return optimal LoRA scale based on LoRA type for better quality/speed balance"""
@@ -95,8 +103,8 @@ def get_optimal_lora_scale(lora_name):
95
  }
96
  return lora_scales.get(lora_name, 1.0)
97
 
98
- # Download all LoRAs at startup
99
- preload_loras()
100
 
101
  torch.cuda.empty_cache()
102
 
@@ -106,26 +114,12 @@ MAX_IMAGE_SIZE = 2048
106
  pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
107
 
108
  @spaces.GPU(duration=75)
109
- def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, lora_selection="None", enable_upscale=False, progress=gr.Progress(track_tqdm=True)):
110
  if randomize_seed:
111
  seed = random.randint(0, MAX_SEED)
112
  generator = torch.Generator().manual_seed(seed)
113
 
114
- # Load selected LoRA
115
- try:
116
- # First unload any existing LoRAs
117
- try:
118
- pipe.unload_lora_weights()
119
- except:
120
- pass # Ignore if no LoRAs loaded
121
-
122
- if lora_selection != "None" and lora_selection in loaded_loras:
123
- # Load with optimized scale for better performance
124
- optimal_scale = get_optimal_lora_scale(lora_selection)
125
- pipe.load_lora_weights(loaded_loras[lora_selection])
126
- print(f"Loaded LoRA: {lora_selection} with scale {optimal_scale}")
127
- except Exception as e:
128
- print(f"Failed to load LoRA {lora_selection}: {e}")
129
 
130
  try:
131
  final_img = None
@@ -222,12 +216,7 @@ with gr.Blocks(css=css) as demo:
222
 
223
  with gr.Accordion("Advanced Settings", open=False):
224
 
225
- lora_selection = gr.Dropdown(
226
- label="LoRA",
227
- choices=list(LORAS.keys()),
228
- value="None",
229
- info="Select a LoRA to enhance image generation"
230
- )
231
 
232
  enable_upscale = gr.Checkbox(
233
  label="Enable 4x Upscaling",
@@ -294,7 +283,7 @@ with gr.Blocks(css=css) as demo:
294
  gr.on(
295
  triggers=[run_button.click, prompt.submit],
296
  fn = infer,
297
- inputs = [prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, lora_selection, enable_upscale],
298
  outputs = [result, seed]
299
  )
300
 
 
65
  print(f"Downloaded {filename}")
66
  return filename
67
 
68
+ def preload_and_apply_all_loras():
69
+ """Download and apply all LoRAs simultaneously at startup"""
70
  global loaded_loras
71
 
72
+ print("Downloading and applying all LoRAs...")
73
 
74
  for lora_name, lora_path in LORAS.items():
75
  if lora_name == "None" or lora_path is None:
 
82
 
83
  loaded_loras[lora_name] = lora_path
84
  print(f"Downloaded {lora_name}")
85
+
86
+ # Apply each LoRA with optimal scaling
87
+ try:
88
+ optimal_scale = get_optimal_lora_scale(lora_name)
89
+ pipe.load_lora_weights(lora_path, adapter_name=lora_name.lower().replace(' ', '_'))
90
+ print(f"Applied {lora_name} with scale {optimal_scale}")
91
+ except Exception as e:
92
+ print(f"Failed to apply {lora_name}: {e}")
93
 
94
+ print(f"All {len(loaded_loras)} LoRAs downloaded and applied!")
95
 
96
  def get_optimal_lora_scale(lora_name):
97
  """Return optimal LoRA scale based on LoRA type for better quality/speed balance"""
 
103
  }
104
  return lora_scales.get(lora_name, 1.0)
105
 
106
+ # Download and apply all LoRAs at startup
107
+ preload_and_apply_all_loras()
108
 
109
  torch.cuda.empty_cache()
110
 
 
114
  pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
115
 
116
  @spaces.GPU(duration=75)
117
+ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, enable_upscale=False, progress=gr.Progress(track_tqdm=True)):
118
  if randomize_seed:
119
  seed = random.randint(0, MAX_SEED)
120
  generator = torch.Generator().manual_seed(seed)
121
 
122
+ # All LoRAs are already loaded and active
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
 
124
  try:
125
  final_img = None
 
216
 
217
  with gr.Accordion("Advanced Settings", open=False):
218
 
219
+ gr.Markdown("**LoRAs Active:** All LoRAs are loaded and active simultaneously")
 
 
 
 
 
220
 
221
  enable_upscale = gr.Checkbox(
222
  label="Enable 4x Upscaling",
 
283
  gr.on(
284
  triggers=[run_button.click, prompt.submit],
285
  fn = infer,
286
+ inputs = [prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, enable_upscale],
287
  outputs = [result, seed]
288
  )
289