Spaces:
Sleeping
Sleeping
oyly
commited on
Commit
·
be4b2be
1
Parent(s):
ff5529d
reduce gpu usage
Browse files- app.py +4 -3
- flux/sampling_lore.py +2 -8
app.py
CHANGED
|
@@ -34,7 +34,7 @@ offload = False
|
|
| 34 |
|
| 35 |
name = model_name
|
| 36 |
is_schnell = model_name == "flux-schnell"
|
| 37 |
-
resize_longside =
|
| 38 |
save = False
|
| 39 |
output_dir = 'outputs_gradio'
|
| 40 |
|
|
@@ -189,6 +189,7 @@ def edit(brush_canvas, source_prompt, inversion_guidance,
|
|
| 189 |
if training_epochs != 0:
|
| 190 |
t5.to('cpu')
|
| 191 |
clip.to('cpu')
|
|
|
|
| 192 |
torch.set_grad_enabled(True)
|
| 193 |
inp_optim["img"] = z0
|
| 194 |
_, info, _, _, trainable_noise_list = denoise_with_noise_optim(model,**inp_optim,token_ids=token_ids,source_mask=source_mask,training_steps=1,training_epochs=training_epochs,learning_rate=0.01,seed=seed,noise_scale=noise_scale,timesteps=timesteps,info=info,guidance=denoise_guidance)
|
|
@@ -208,7 +209,7 @@ def edit(brush_canvas, source_prompt, inversion_guidance,
|
|
| 208 |
|
| 209 |
# decode latents to pixel space
|
| 210 |
batch_x = unpack(x.float(), width,height)
|
| 211 |
-
|
| 212 |
for x in batch_x:
|
| 213 |
x = x.unsqueeze(0)
|
| 214 |
|
|
@@ -276,7 +277,7 @@ def create_demo(model_name: str):
|
|
| 276 |
|
| 277 |
🎨 [<b>Examples</b>] Click our examples below, draw your mask and click the "Edit" button. <br>
|
| 278 |
|
| 279 |
-
🔔 [<b>Note</b>] Due to limited resources in spaces, we will resize image to <=
|
| 280 |
If you need high resolution for better quality, go to https://github.com/oyly16/LORE for more usage with your own resource. <br>
|
| 281 |
"""
|
| 282 |
article = r"""
|
|
|
|
| 34 |
|
| 35 |
name = model_name
|
| 36 |
is_schnell = model_name == "flux-schnell"
|
| 37 |
+
resize_longside = 480
|
| 38 |
save = False
|
| 39 |
output_dir = 'outputs_gradio'
|
| 40 |
|
|
|
|
| 189 |
if training_epochs != 0:
|
| 190 |
t5.to('cpu')
|
| 191 |
clip.to('cpu')
|
| 192 |
+
ae.to('cpu')
|
| 193 |
torch.set_grad_enabled(True)
|
| 194 |
inp_optim["img"] = z0
|
| 195 |
_, info, _, _, trainable_noise_list = denoise_with_noise_optim(model,**inp_optim,token_ids=token_ids,source_mask=source_mask,training_steps=1,training_epochs=training_epochs,learning_rate=0.01,seed=seed,noise_scale=noise_scale,timesteps=timesteps,info=info,guidance=denoise_guidance)
|
|
|
|
| 209 |
|
| 210 |
# decode latents to pixel space
|
| 211 |
batch_x = unpack(x.float(), width,height)
|
| 212 |
+
ae.to(device)
|
| 213 |
for x in batch_x:
|
| 214 |
x = x.unsqueeze(0)
|
| 215 |
|
|
|
|
| 277 |
|
| 278 |
🎨 [<b>Examples</b>] Click our examples below, draw your mask and click the "Edit" button. <br>
|
| 279 |
|
| 280 |
+
🔔 [<b>Note</b>] Due to limited resources in spaces, we will resize image to <=480 longside. <br>
|
| 281 |
If you need high resolution for better quality, go to https://github.com/oyly16/LORE for more usage with your own resource. <br>
|
| 282 |
"""
|
| 283 |
article = r"""
|
flux/sampling_lore.py
CHANGED
|
@@ -156,7 +156,6 @@ def denoise(
|
|
| 156 |
guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype)
|
| 157 |
|
| 158 |
step_list = []
|
| 159 |
-
attn_map_list = []
|
| 160 |
for i, (t_curr, t_prev) in enumerate(zip(timesteps[:-1], timesteps[1:])):
|
| 161 |
t_vec = torch.full((img.shape[0],), t_curr, dtype=img.dtype, device=img.device)
|
| 162 |
info['t'] = t_prev if inverse else t_curr
|
|
@@ -200,10 +199,8 @@ def denoise(
|
|
| 200 |
|
| 201 |
# return attnmaps L,1,512,N
|
| 202 |
step_list.append(t_curr)
|
| 203 |
-
attn_map_list.append((attn_maps_mid+attn_maps)/2)
|
| 204 |
|
| 205 |
-
|
| 206 |
-
return img, info, step_list, attn_map_list
|
| 207 |
|
| 208 |
selected_layers = range(8,44)
|
| 209 |
|
|
@@ -276,7 +273,6 @@ def denoise_with_noise_optim(
|
|
| 276 |
guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype)
|
| 277 |
|
| 278 |
step_list = []
|
| 279 |
-
attn_map_list = []
|
| 280 |
trainable_noise_list = []
|
| 281 |
for i, (t_curr, t_prev) in enumerate(zip(timesteps[:-1], timesteps[1:])):
|
| 282 |
if i >= training_steps:
|
|
@@ -353,13 +349,11 @@ def denoise_with_noise_optim(
|
|
| 353 |
optimizer.step()
|
| 354 |
print(f"Time {t_curr:.4f} Step {j+1}/{training_epochs}, Loss: {total_loss.item():.6f}")
|
| 355 |
|
| 356 |
-
attn_map_list.append(attn_maps.detach())
|
| 357 |
step_list.append(t_curr)
|
| 358 |
trainable_noise = trainable_noise.detach()
|
| 359 |
trainable_noise_list.append(trainable_noise.clone())
|
| 360 |
|
| 361 |
-
|
| 362 |
-
return img, info, step_list, attn_map_list, trainable_noise_list
|
| 363 |
|
| 364 |
def unpack(x: Tensor, height: int, width: int) -> Tensor:
|
| 365 |
return rearrange(
|
|
|
|
| 156 |
guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype)
|
| 157 |
|
| 158 |
step_list = []
|
|
|
|
| 159 |
for i, (t_curr, t_prev) in enumerate(zip(timesteps[:-1], timesteps[1:])):
|
| 160 |
t_vec = torch.full((img.shape[0],), t_curr, dtype=img.dtype, device=img.device)
|
| 161 |
info['t'] = t_prev if inverse else t_curr
|
|
|
|
| 199 |
|
| 200 |
# return attnmaps L,1,512,N
|
| 201 |
step_list.append(t_curr)
|
|
|
|
| 202 |
|
| 203 |
+
return img, info, step_list, None
|
|
|
|
| 204 |
|
| 205 |
selected_layers = range(8,44)
|
| 206 |
|
|
|
|
| 273 |
guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype)
|
| 274 |
|
| 275 |
step_list = []
|
|
|
|
| 276 |
trainable_noise_list = []
|
| 277 |
for i, (t_curr, t_prev) in enumerate(zip(timesteps[:-1], timesteps[1:])):
|
| 278 |
if i >= training_steps:
|
|
|
|
| 349 |
optimizer.step()
|
| 350 |
print(f"Time {t_curr:.4f} Step {j+1}/{training_epochs}, Loss: {total_loss.item():.6f}")
|
| 351 |
|
|
|
|
| 352 |
step_list.append(t_curr)
|
| 353 |
trainable_noise = trainable_noise.detach()
|
| 354 |
trainable_noise_list.append(trainable_noise.clone())
|
| 355 |
|
| 356 |
+
return img, info, step_list, None, trainable_noise_list
|
|
|
|
| 357 |
|
| 358 |
def unpack(x: Tensor, height: int, width: int) -> Tensor:
|
| 359 |
return rearrange(
|