Spaces:
Running on T4
Running on T4
Update app.py
Browse filesFixed Bells and Whistles by adding Pulleys and Levers
app.py
CHANGED
|
@@ -12,12 +12,12 @@ upscaler = upscaler.to(device)
|
|
| 12 |
pipe = pipe.to(device)
|
| 13 |
|
| 14 |
def genie (Prompt, negative_prompt, height, width, scale, steps, seed, upscale, upscale_prompt, upscale_neg, upscale_scale, upscale_steps):
|
| 15 |
-
generator = torch.Generator(device=device).manual_seed(
|
| 16 |
if upscale == "Yes":
|
| 17 |
-
low_res_latents = pipe(Prompt, negative_prompt=negative_prompt, num_inference_steps=steps, guidance_scale=scale, generator=generator, output_type="latent").images
|
| 18 |
-
image = upscaler(prompt=
|
| 19 |
else:
|
| 20 |
-
image = pipe(Prompt, negative_prompt=negative_prompt, num_inference_steps=steps, guidance_scale=scale, generator=generator).images[0]
|
| 21 |
return image
|
| 22 |
|
| 23 |
gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
|
|
|
|
| 12 |
pipe = pipe.to(device)
|
| 13 |
|
| 14 |
def genie (Prompt, negative_prompt, height, width, scale, steps, seed, upscale, upscale_prompt, upscale_neg, upscale_scale, upscale_steps):
|
| 15 |
+
generator = torch.Generator(device=device).manual_seed(seed)
|
| 16 |
if upscale == "Yes":
|
| 17 |
+
low_res_latents = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, generator=generator, output_type="latent").images
|
| 18 |
+
image = upscaler(prompt=upscale_prompt, negative_prompt=upscale_neg, image=low_res_latents, num_inference_steps=upscale_steps, guidance_scale=upscale_scale, generator=generator).images[0]
|
| 19 |
else:
|
| 20 |
+
image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, generator=generator).images[0]
|
| 21 |
return image
|
| 22 |
|
| 23 |
gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
|