pva22
commited on
Commit
·
e1a4a2a
1
Parent(s):
231b559
remove lora_scale param
Browse files- app.py +1 -2
- methods.py +1 -6
app.py
CHANGED
|
@@ -165,8 +165,7 @@ with gr.Blocks() as demo:
|
|
| 165 |
|
| 166 |
use_advanced_ip,
|
| 167 |
ip_adapter_scale,
|
| 168 |
-
image_upload_ip
|
| 169 |
-
lora_scale
|
| 170 |
],
|
| 171 |
outputs=[result, seed],
|
| 172 |
)
|
|
|
|
| 165 |
|
| 166 |
use_advanced_ip,
|
| 167 |
ip_adapter_scale,
|
| 168 |
+
image_upload_ip
|
|
|
|
| 169 |
],
|
| 170 |
outputs=[result, seed],
|
| 171 |
)
|
methods.py
CHANGED
|
@@ -131,8 +131,7 @@ def infer(
|
|
| 131 |
use_advanced_ip=False,
|
| 132 |
ip_adapter_scale=None,
|
| 133 |
image_upload_ip=None,
|
| 134 |
-
|
| 135 |
-
|
| 136 |
model_lora_id=model_lora_default,
|
| 137 |
progress=gr.Progress(track_tqdm=True),
|
| 138 |
dtype=torch.float16,
|
|
@@ -154,7 +153,6 @@ def infer(
|
|
| 154 |
|
| 155 |
image = pipe(prompt,
|
| 156 |
num_inference_steps=num_inference_steps,
|
| 157 |
-
lora_scale=lora_scale,
|
| 158 |
guidance_scale=guidance_scale,
|
| 159 |
negative_prompt=negative_prompt,
|
| 160 |
width=width,
|
|
@@ -175,7 +173,6 @@ def infer(
|
|
| 175 |
image = pipe(prompt,
|
| 176 |
edges,
|
| 177 |
num_inference_steps = num_inference_steps,
|
| 178 |
-
lora_scale=lora_scale,
|
| 179 |
controlnet_conditioning_scale=control_strength,
|
| 180 |
negative_prompt=negative_prompt,
|
| 181 |
generator=generator).images[0]
|
|
@@ -191,7 +188,6 @@ def infer(
|
|
| 191 |
image = pipe(
|
| 192 |
prompt,
|
| 193 |
ip_adapter_image=image_upload_ip,
|
| 194 |
-
lora_scale=lora_scale,
|
| 195 |
num_inference_steps=num_inference_steps,
|
| 196 |
guidance_scale=guidance_scale,
|
| 197 |
generator=generator).images[0]
|
|
@@ -213,7 +209,6 @@ def infer(
|
|
| 213 |
image = pipe(prompt,
|
| 214 |
edges,
|
| 215 |
ip_adapter_image=image_upload_ip,
|
| 216 |
-
lora_scale=lora_scale,
|
| 217 |
num_inference_steps=num_inference_steps,
|
| 218 |
guidance_scale=guidance_scale,
|
| 219 |
controlnet_conditioning_scale=control_strength,
|
|
|
|
| 131 |
use_advanced_ip=False,
|
| 132 |
ip_adapter_scale=None,
|
| 133 |
image_upload_ip=None,
|
| 134 |
+
|
|
|
|
| 135 |
model_lora_id=model_lora_default,
|
| 136 |
progress=gr.Progress(track_tqdm=True),
|
| 137 |
dtype=torch.float16,
|
|
|
|
| 153 |
|
| 154 |
image = pipe(prompt,
|
| 155 |
num_inference_steps=num_inference_steps,
|
|
|
|
| 156 |
guidance_scale=guidance_scale,
|
| 157 |
negative_prompt=negative_prompt,
|
| 158 |
width=width,
|
|
|
|
| 173 |
image = pipe(prompt,
|
| 174 |
edges,
|
| 175 |
num_inference_steps = num_inference_steps,
|
|
|
|
| 176 |
controlnet_conditioning_scale=control_strength,
|
| 177 |
negative_prompt=negative_prompt,
|
| 178 |
generator=generator).images[0]
|
|
|
|
| 188 |
image = pipe(
|
| 189 |
prompt,
|
| 190 |
ip_adapter_image=image_upload_ip,
|
|
|
|
| 191 |
num_inference_steps=num_inference_steps,
|
| 192 |
guidance_scale=guidance_scale,
|
| 193 |
generator=generator).images[0]
|
|
|
|
| 209 |
image = pipe(prompt,
|
| 210 |
edges,
|
| 211 |
ip_adapter_image=image_upload_ip,
|
|
|
|
| 212 |
num_inference_steps=num_inference_steps,
|
| 213 |
guidance_scale=guidance_scale,
|
| 214 |
controlnet_conditioning_scale=control_strength,
|