Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -31,6 +31,7 @@ import diffusers
|
|
| 31 |
# init
|
| 32 |
dtype = torch.bfloat16
|
| 33 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
|
|
| 34 |
base_model = "black-forest-labs/FLUX.1-dev"
|
| 35 |
|
| 36 |
# load pipe
|
|
@@ -116,12 +117,16 @@ def run_lora(prompt, image_url, lora_strings_json, image_strength, cfg_scale, s
|
|
| 116 |
gr.Info("Starting process")
|
| 117 |
img2img_model = False
|
| 118 |
orginal_image = None
|
| 119 |
-
|
| 120 |
-
|
|
|
|
|
|
|
| 121 |
img2img_model = True
|
| 122 |
img2img_pipe.to(device)
|
| 123 |
else:
|
|
|
|
| 124 |
txt2img_pipe.to(device)
|
|
|
|
| 125 |
# Set random seed for reproducibility
|
| 126 |
if randomize_seed:
|
| 127 |
with calculateDuration("Set random seed"):
|
|
@@ -130,7 +135,7 @@ def run_lora(prompt, image_url, lora_strings_json, image_strength, cfg_scale, s
|
|
| 130 |
# Load LoRA weights
|
| 131 |
gr.Info("Start to load LoRA ...")
|
| 132 |
with calculateDuration("Unloading LoRA"):
|
| 133 |
-
img2img_pipe.unload_lora_weights()
|
| 134 |
txt2img_pipe.unload_lora_weights()
|
| 135 |
|
| 136 |
lora_configs = None
|
|
@@ -162,7 +167,6 @@ def run_lora(prompt, image_url, lora_strings_json, image_strength, cfg_scale, s
|
|
| 162 |
try:
|
| 163 |
if img2img_model:
|
| 164 |
img2img_pipe.load_lora_weights(lora_repo, weight_name=weights, low_cpu_mem_usage=True, adapter_name=lora_name)
|
| 165 |
-
img2img_pipe.set
|
| 166 |
else:
|
| 167 |
txt2img_pipe.load_lora_weights(lora_repo, weight_name=weights, low_cpu_mem_usage=True, adapter_name=lora_name)
|
| 168 |
except:
|
|
@@ -187,6 +191,7 @@ def run_lora(prompt, image_url, lora_strings_json, image_strength, cfg_scale, s
|
|
| 187 |
joint_attention_kwargs = {"scale": 1}
|
| 188 |
|
| 189 |
if orginal_image:
|
|
|
|
| 190 |
final_image = img2img_pipe(
|
| 191 |
prompt=prompt,
|
| 192 |
image=orginal_image,
|
|
@@ -199,6 +204,7 @@ def run_lora(prompt, image_url, lora_strings_json, image_strength, cfg_scale, s
|
|
| 199 |
joint_attention_kwargs=joint_attention_kwargs
|
| 200 |
).images[0]
|
| 201 |
else:
|
|
|
|
| 202 |
final_image = txt2img_pipe(
|
| 203 |
prompt=prompt,
|
| 204 |
num_inference_steps=steps,
|
|
|
|
| 31 |
# init
|
| 32 |
dtype = torch.bfloat16
|
| 33 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 34 |
+
print(device)
|
| 35 |
base_model = "black-forest-labs/FLUX.1-dev"
|
| 36 |
|
| 37 |
# load pipe
|
|
|
|
| 117 |
gr.Info("Starting process")
|
| 118 |
img2img_model = False
|
| 119 |
orginal_image = None
|
| 120 |
+
print(device)
|
| 121 |
+
if image_url and image_url != "":
|
| 122 |
+
print("img2img")
|
| 123 |
+
orginal_image = load_image(image_url).to(device)
|
| 124 |
img2img_model = True
|
| 125 |
img2img_pipe.to(device)
|
| 126 |
else:
|
| 127 |
+
print("txt2img")
|
| 128 |
txt2img_pipe.to(device)
|
| 129 |
+
|
| 130 |
# Set random seed for reproducibility
|
| 131 |
if randomize_seed:
|
| 132 |
with calculateDuration("Set random seed"):
|
|
|
|
| 135 |
# Load LoRA weights
|
| 136 |
gr.Info("Start to load LoRA ...")
|
| 137 |
with calculateDuration("Unloading LoRA"):
|
| 138 |
+
# img2img_pipe.unload_lora_weights()
|
| 139 |
txt2img_pipe.unload_lora_weights()
|
| 140 |
|
| 141 |
lora_configs = None
|
|
|
|
| 167 |
try:
|
| 168 |
if img2img_model:
|
| 169 |
img2img_pipe.load_lora_weights(lora_repo, weight_name=weights, low_cpu_mem_usage=True, adapter_name=lora_name)
|
|
|
|
| 170 |
else:
|
| 171 |
txt2img_pipe.load_lora_weights(lora_repo, weight_name=weights, low_cpu_mem_usage=True, adapter_name=lora_name)
|
| 172 |
except:
|
|
|
|
| 191 |
joint_attention_kwargs = {"scale": 1}
|
| 192 |
|
| 193 |
if orginal_image:
|
| 194 |
+
img2img_pipe.to(device)
|
| 195 |
final_image = img2img_pipe(
|
| 196 |
prompt=prompt,
|
| 197 |
image=orginal_image,
|
|
|
|
| 204 |
joint_attention_kwargs=joint_attention_kwargs
|
| 205 |
).images[0]
|
| 206 |
else:
|
| 207 |
+
txt2img_pipe.to(device)
|
| 208 |
final_image = txt2img_pipe(
|
| 209 |
prompt=prompt,
|
| 210 |
num_inference_steps=steps,
|