Spaces:
Running
on
A10G
Running
on
A10G
try fix OOM with HF GPU
Browse files- app.py +4 -2
- src/editor.py +4 -4
app.py
CHANGED
|
@@ -44,8 +44,10 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 44 |
editor_state = gr.State()
|
| 45 |
|
| 46 |
@spaces.GPU
|
| 47 |
-
def set_pipe(input_image, description_prompt, edit_guidance_scale, num_inference_steps=4,
|
| 48 |
num_inversion_steps=4, inversion_max_step=0.6, rnri_iterations=2, rnri_alpha=0.1, rnri_lr=0.2):
|
|
|
|
|
|
|
| 49 |
scheduler_class = MyEulerAncestralDiscreteScheduler
|
| 50 |
|
| 51 |
print('\n################## 1')
|
|
@@ -172,7 +174,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 172 |
|
| 173 |
set_button.click(
|
| 174 |
fn=set_pipe,
|
| 175 |
-
inputs=[input_image, description_prompt, edit_guidance_scale, num_inference_steps,
|
| 176 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
|
| 177 |
outputs=[editor_state, is_set_text],
|
| 178 |
)
|
|
|
|
| 44 |
editor_state = gr.State()
|
| 45 |
|
| 46 |
@spaces.GPU
|
| 47 |
+
def set_pipe(image_editor, input_image, description_prompt, edit_guidance_scale, num_inference_steps=4,
|
| 48 |
num_inversion_steps=4, inversion_max_step=0.6, rnri_iterations=2, rnri_alpha=0.1, rnri_lr=0.2):
|
| 49 |
+
if image_editor is not None:
|
| 50 |
+
image_editor = image_editor.to('cpu')
|
| 51 |
scheduler_class = MyEulerAncestralDiscreteScheduler
|
| 52 |
|
| 53 |
print('\n################## 1')
|
|
|
|
| 174 |
|
| 175 |
set_button.click(
|
| 176 |
fn=set_pipe,
|
| 177 |
+
inputs=[editor_state, input_image, description_prompt, edit_guidance_scale, num_inference_steps,
|
| 178 |
num_inference_steps, inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
|
| 179 |
outputs=[editor_state, is_set_text],
|
| 180 |
)
|
src/editor.py
CHANGED
|
@@ -91,10 +91,10 @@ class ImageEditorDemo:
|
|
| 91 |
return image
|
| 92 |
|
| 93 |
def to(self, device):
|
| 94 |
-
self.pipe_inference.to(device)
|
| 95 |
-
self.pipe_inversion.to(device)
|
| 96 |
-
self.last_latent.to(device)
|
| 97 |
-
self.original_latent.to(device)
|
| 98 |
|
| 99 |
self.pipe_inversion.scheduler.set_noise_list_device(device)
|
| 100 |
self.pipe_inference.scheduler.set_noise_list_device(device)
|
|
|
|
| 91 |
return image
|
| 92 |
|
| 93 |
def to(self, device):
|
| 94 |
+
self.pipe_inference = self.pipe_inference.to(device)
|
| 95 |
+
self.pipe_inversion = self.pipe_inversion.to(device)
|
| 96 |
+
self.last_latent = self.last_latent.to(device)
|
| 97 |
+
self.original_latent = self.original_latent.to(device)
|
| 98 |
|
| 99 |
self.pipe_inversion.scheduler.set_noise_list_device(device)
|
| 100 |
self.pipe_inference.scheduler.set_noise_list_device(device)
|