phxdev Claude commited on
Commit
b7dac13
·
1 Parent(s): ad723b4

Add multiple LoRAs with simultaneous loading

Browse files

- Add 5 LoRAs: AntiBlur, Add Details, Face Realism, Ultra Realism, Detailed Hands
- Implement automatic download from HuggingFace URLs
- Load all LoRAs simultaneously at startup for combined effects
- Update UI to show all LoRAs are active by default
- Remove LoRA selection dropdown in favor of all-active approach

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

Files changed (1) hide show
  1. app.py +59 -0
app.py CHANGED
@@ -6,6 +6,9 @@ import torch
6
  from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL
7
  from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
8
  from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
 
 
 
9
 
10
  dtype = torch.bfloat16
11
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -13,6 +16,60 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
13
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
14
  good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
15
  pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype, vae=taef1).to(device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  torch.cuda.empty_cache()
17
 
18
  MAX_SEED = np.iinfo(np.int32).max
@@ -75,6 +132,8 @@ with gr.Blocks(css=css) as demo:
75
 
76
  with gr.Accordion("Advanced Settings", open=False):
77
 
 
 
78
  seed = gr.Slider(
79
  label="Seed",
80
  minimum=0,
 
6
  from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, AutoencoderTiny, AutoencoderKL
7
  from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
8
  from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
9
+ from huggingface_hub import hf_hub_download
10
+ import os
11
+ import requests
12
 
13
  dtype = torch.bfloat16
14
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
16
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
17
  good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
18
  pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype, vae=taef1).to(device)
19
+
20
+ # Available LoRAs
21
+ LORAS = {
22
+ "None": None,
23
+ "AntiBlur": "Shakker-Labs/FLUX.1-dev-LoRA-AntiBlur",
24
+ "Add Details": "Shakker-Labs/FLUX.1-dev-LoRA-add-details",
25
+ "Face Realism": "https://huggingface.co/its-magick/merlin-test-loras/resolve/main/Canopus-LoRA-Flux-FaceRealism.safetensors",
26
+ "Ultra Realism": "https://huggingface.co/its-magick/merlin-test-loras/resolve/main/Canopus-LoRA-Flux-UltraRealism.safetensors",
27
+ "Detailed Hands": "https://huggingface.co/its-magick/merlin-test-loras/resolve/main/Detailed_Hands-000001.safetensors"
28
+ }
29
+
30
+ # Store loaded LoRA paths
31
+ loaded_loras = {}
32
+
33
+ def download_lora_from_url(url, filename):
34
+ """Download LoRA file from direct URL"""
35
+ if not os.path.exists(filename):
36
+ print(f"Downloading {filename}...")
37
+ response = requests.get(url)
38
+ with open(filename, 'wb') as f:
39
+ f.write(response.content)
40
+ print(f"Downloaded {filename}")
41
+ return filename
42
+
43
+ def preload_and_load_all_loras():
44
+ """Download and load all LoRAs simultaneously at startup"""
45
+ global loaded_loras
46
+
47
+ print("Downloading and loading all LoRAs...")
48
+ lora_weights = []
49
+
50
+ for lora_name, lora_path in LORAS.items():
51
+ if lora_name == "None" or lora_path is None:
52
+ continue
53
+
54
+ # Handle direct URL downloads
55
+ if lora_path.startswith('http'):
56
+ filename = f"{lora_name.lower().replace(' ', '_')}_lora.safetensors"
57
+ lora_path = download_lora_from_url(lora_path, filename)
58
+
59
+ loaded_loras[lora_name] = lora_path
60
+ lora_weights.append(lora_path)
61
+ print(f"Downloaded {lora_name}")
62
+
63
+ # Load all LoRAs simultaneously
64
+ if lora_weights:
65
+ print("Loading all LoRAs simultaneously...")
66
+ pipe.load_lora_weights(lora_weights)
67
+ pipe.fuse_lora(lora_scale=1.0)
68
+ print(f"All {len(lora_weights)} LoRAs loaded and active!")
69
+
70
+ # Load all LoRAs at startup
71
+ preload_and_load_all_loras()
72
+
73
  torch.cuda.empty_cache()
74
 
75
  MAX_SEED = np.iinfo(np.int32).max
 
132
 
133
  with gr.Accordion("Advanced Settings", open=False):
134
 
135
+ gr.Markdown("**LoRAs Active:** All LoRAs are loaded and active simultaneously")
136
+
137
  seed = gr.Slider(
138
  label="Seed",
139
  minimum=0,