Spaces:
Running
on
A10G
Running
on
A10G
Commit
·
8c6eab4
1
Parent(s):
a9765cd
Update due to Gradio bug
Browse files
app.py
CHANGED
|
@@ -46,7 +46,7 @@ model = load_model_from_config(config, f"txt2img-f8-large.ckpt")
|
|
| 46 |
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
| 47 |
model = model.to(device)
|
| 48 |
|
| 49 |
-
def run(prompt, steps, width, height, images, scale
|
| 50 |
if images == 6:
|
| 51 |
images = 3
|
| 52 |
n_iter = 2
|
|
@@ -56,7 +56,7 @@ def run(prompt, steps, width, height, images, scale, eta):
|
|
| 56 |
prompt = prompt,
|
| 57 |
outdir='latent-diffusion/outputs',
|
| 58 |
ddim_steps = int(steps),
|
| 59 |
-
ddim_eta =
|
| 60 |
n_iter = n_iter,
|
| 61 |
W=int(width),
|
| 62 |
H=int(height),
|
|
@@ -126,12 +126,11 @@ css = ".output-image{height: 528px !important} .output-carousel .output-image{he
|
|
| 126 |
iface = gr.Interface(fn=run, inputs=[
|
| 127 |
gr.inputs.Textbox(label="Prompt",default="A drawing of a cute dog with a funny hat"),
|
| 128 |
gr.inputs.Slider(label="Steps - more steps can increase quality but will take longer to generate",default=50,maximum=250,minimum=1,step=1),
|
| 129 |
-
gr.inputs.
|
| 130 |
-
gr.inputs.
|
| 131 |
gr.inputs.Slider(label="Images - How many images you wish to generate", default=4, step=2, minimum=2, maximum=6),
|
| 132 |
-
gr.inputs.Slider(label="Diversity scale - How different from one another you wish the images to be",default=5.0, minimum=1),
|
| 133 |
-
gr.inputs.Slider(label="ETA - between 0 and 1. Lower values can provide better quality, higher values can be more diverse",default=0.0,minimum=0.0, maximum=1.0,step=0.1),
|
| 134 |
-
|
| 135 |
],
|
| 136 |
outputs=[image,gr.outputs.Carousel(label="Individual images",components=["image"])],
|
| 137 |
css=css,
|
|
|
|
| 46 |
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
| 47 |
model = model.to(device)
|
| 48 |
|
| 49 |
+
def run(prompt, steps, width, height, images, scale):
|
| 50 |
if images == 6:
|
| 51 |
images = 3
|
| 52 |
n_iter = 2
|
|
|
|
| 56 |
prompt = prompt,
|
| 57 |
outdir='latent-diffusion/outputs',
|
| 58 |
ddim_steps = int(steps),
|
| 59 |
+
ddim_eta = 0,
|
| 60 |
n_iter = n_iter,
|
| 61 |
W=int(width),
|
| 62 |
H=int(height),
|
|
|
|
| 126 |
iface = gr.Interface(fn=run, inputs=[
|
| 127 |
gr.inputs.Textbox(label="Prompt",default="A drawing of a cute dog with a funny hat"),
|
| 128 |
gr.inputs.Slider(label="Steps - more steps can increase quality but will take longer to generate",default=50,maximum=250,minimum=1,step=1),
|
| 129 |
+
gr.inputs.Radio(label="Width", choices=[32,64,128,256,384],default=256),
|
| 130 |
+
gr.inputs.Radio(label="Height", choices=[32,64,128,256,384],default=256),
|
| 131 |
gr.inputs.Slider(label="Images - How many images you wish to generate", default=4, step=2, minimum=2, maximum=6),
|
| 132 |
+
gr.inputs.Slider(label="Diversity scale - How different from one another you wish the images to be",default=5.0, minimum=1.0, maximum=50),
|
| 133 |
+
#gr.inputs.Slider(label="ETA - between 0 and 1. Lower values can provide better quality, higher values can be more diverse",default=0.0,minimum=0.0, maximum=1.0,step=0.1),
|
|
|
|
| 134 |
],
|
| 135 |
outputs=[image,gr.outputs.Carousel(label="Individual images",components=["image"])],
|
| 136 |
css=css,
|