Spaces:
Running
Running
example
Browse files
app.py
CHANGED
|
@@ -226,6 +226,7 @@ class DDIMSampler(Sampler):
|
|
| 226 |
steps = np.concatenate((steps, steps[-1:]), axis=0)
|
| 227 |
|
| 228 |
x_t = torch.tile(noised_latents, (batch_size, 1, 1, 1)).to(self.device) # 32, 32
|
|
|
|
| 229 |
for i in trange(len(steps) - 1):
|
| 230 |
x_t = self.sample(model, x_t, steps[i], steps[i + 1], eta)
|
| 231 |
|
|
@@ -486,7 +487,7 @@ def init_webui(unet, vae, normal_t):
|
|
| 486 |
looper = sampler.sample_loop(unet, vae.middle_c, batch_size, step_value, shape=img_size, eta=1.)
|
| 487 |
else:
|
| 488 |
input_image_value = Image.fromarray(input_image_value).resize((img_size[0] * 8, img_size[1] * 8),
|
| 489 |
-
Image.
|
| 490 |
input_image_value = np.array(input_image_value, dtype=np.float32) / 255.
|
| 491 |
input_image_value = np.transpose(input_image_value, (2, 0, 1))
|
| 492 |
input_image_value = torch.Tensor([input_image_value]).to(device)
|
|
@@ -537,7 +538,7 @@ def init_webui(unet, vae, normal_t):
|
|
| 537 |
inputs=[step_u, batch_size_u, sampler_name_u, img_size_u, ramdom_seed_u],
|
| 538 |
outputs=output_images_u,
|
| 539 |
fn=process_image_u,
|
| 540 |
-
|
| 541 |
)
|
| 542 |
with gr.Tab(label="image to image"):
|
| 543 |
with gr.Column():
|
|
@@ -565,7 +566,7 @@ def init_webui(unet, vae, normal_t):
|
|
| 565 |
inputs=[input_image, noise_step, step, batch_size, sampler_name, img_size, ramdom_seed],
|
| 566 |
outputs=output_images,
|
| 567 |
fn=process_image,
|
| 568 |
-
|
| 569 |
)
|
| 570 |
|
| 571 |
start_button.click(process_image,
|
|
|
|
| 226 |
steps = np.concatenate((steps, steps[-1:]), axis=0)
|
| 227 |
|
| 228 |
x_t = torch.tile(noised_latents, (batch_size, 1, 1, 1)).to(self.device) # 32, 32
|
| 229 |
+
print("sample", steps)
|
| 230 |
for i in trange(len(steps) - 1):
|
| 231 |
x_t = self.sample(model, x_t, steps[i], steps[i + 1], eta)
|
| 232 |
|
|
|
|
| 487 |
looper = sampler.sample_loop(unet, vae.middle_c, batch_size, step_value, shape=img_size, eta=1.)
|
| 488 |
else:
|
| 489 |
input_image_value = Image.fromarray(input_image_value).resize((img_size[0] * 8, img_size[1] * 8),
|
| 490 |
+
resample=Image.BILINEAR)
|
| 491 |
input_image_value = np.array(input_image_value, dtype=np.float32) / 255.
|
| 492 |
input_image_value = np.transpose(input_image_value, (2, 0, 1))
|
| 493 |
input_image_value = torch.Tensor([input_image_value]).to(device)
|
|
|
|
| 538 |
inputs=[step_u, batch_size_u, sampler_name_u, img_size_u, ramdom_seed_u],
|
| 539 |
outputs=output_images_u,
|
| 540 |
fn=process_image_u,
|
| 541 |
+
cache_examples=False,
|
| 542 |
)
|
| 543 |
with gr.Tab(label="image to image"):
|
| 544 |
with gr.Column():
|
|
|
|
| 566 |
inputs=[input_image, noise_step, step, batch_size, sampler_name, img_size, ramdom_seed],
|
| 567 |
outputs=output_images,
|
| 568 |
fn=process_image,
|
| 569 |
+
cache_examples=False,
|
| 570 |
)
|
| 571 |
|
| 572 |
start_button.click(process_image,
|