Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -144,6 +144,54 @@ with gr.Blocks(css=css) as demo:
|
|
| 144 |
uploaded_image = gr.Image(label="Upload Image", type="pil")
|
| 145 |
image_url = gr.Textbox(label="Image URL", placeholder="Enter image URL")
|
| 146 |
use_generated_image = gr.Button("Use Generated Image")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
|
| 148 |
additional_image_output = gr.Image(label="Selected Image", show_label=False)
|
| 149 |
|
|
@@ -163,7 +211,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 163 |
@spaces.GPU
|
| 164 |
def image2image(uploaded_image, image_url, use_generated=False):
|
| 165 |
image = select_image(uploaded_image, image_url, use_generated=False)
|
| 166 |
-
prompt = "
|
| 167 |
generator = torch.Generator(device=device).manual_seed(1024)
|
| 168 |
image = pipeline2Image(prompt=prompt, image=image, strength=0.75, guidance_scale=7.5, generator=generator).images[0]
|
| 169 |
return image
|
|
|
|
| 144 |
uploaded_image = gr.Image(label="Upload Image", type="pil")
|
| 145 |
image_url = gr.Textbox(label="Image URL", placeholder="Enter image URL")
|
| 146 |
use_generated_image = gr.Button("Use Generated Image")
|
| 147 |
+
|
| 148 |
+
with gr.Accordion("Advanced Settings", open=False):
|
| 149 |
+
|
| 150 |
+
seed2 = gr.Slider(
|
| 151 |
+
label="Seed",
|
| 152 |
+
minimum=0,
|
| 153 |
+
maximum=MAX_SEED,
|
| 154 |
+
step=1,
|
| 155 |
+
value=0,
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
randomize_seed2 = gr.Checkbox(label="Randomize seed", value=True)
|
| 159 |
+
|
| 160 |
+
with gr.Row():
|
| 161 |
+
|
| 162 |
+
width2 = gr.Slider(
|
| 163 |
+
label="Width",
|
| 164 |
+
minimum=256,
|
| 165 |
+
maximum=MAX_IMAGE_SIZE,
|
| 166 |
+
step=32,
|
| 167 |
+
value=1024,
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
height2 = gr.Slider(
|
| 171 |
+
label="Height",
|
| 172 |
+
minimum=256,
|
| 173 |
+
maximum=MAX_IMAGE_SIZE,
|
| 174 |
+
step=32,
|
| 175 |
+
value=1024,
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
with gr.Row():
|
| 179 |
+
|
| 180 |
+
guidance_scale2 = gr.Slider(
|
| 181 |
+
label="Guidance Scale",
|
| 182 |
+
minimum=1,
|
| 183 |
+
maximum=15,
|
| 184 |
+
step=0.1,
|
| 185 |
+
value=3.5,
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
num_inference_steps2 = gr.Slider(
|
| 189 |
+
label="Number of inference steps",
|
| 190 |
+
minimum=1,
|
| 191 |
+
maximum=50,
|
| 192 |
+
step=1,
|
| 193 |
+
value=28,
|
| 194 |
+
)
|
| 195 |
|
| 196 |
additional_image_output = gr.Image(label="Selected Image", show_label=False)
|
| 197 |
|
|
|
|
| 211 |
@spaces.GPU
|
| 212 |
def image2image(uploaded_image, image_url, use_generated=False):
|
| 213 |
image = select_image(uploaded_image, image_url, use_generated=False)
|
| 214 |
+
prompt = "one awesome dude"
|
| 215 |
generator = torch.Generator(device=device).manual_seed(1024)
|
| 216 |
image = pipeline2Image(prompt=prompt, image=image, strength=0.75, guidance_scale=7.5, generator=generator).images[0]
|
| 217 |
return image
|