Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -23,7 +23,7 @@ def read_content(file_path: str) -> str:
|
|
| 23 |
|
| 24 |
return content
|
| 25 |
|
| 26 |
-
def predict(image, prompt="high quality, best quality", negative_prompt="", guidance_scale=5, steps=30, ip_adapter_scale = 1.0, width=1024, height=1024, seed=0):
|
| 27 |
|
| 28 |
pipe.set_ip_adapter_scale(ip_adapter_scale)
|
| 29 |
|
|
@@ -31,7 +31,8 @@ def predict(image, prompt="high quality, best quality", negative_prompt="", guid
|
|
| 31 |
negative_prompt = None
|
| 32 |
|
| 33 |
init_image = image.convert("RGB")
|
| 34 |
-
|
|
|
|
| 35 |
|
| 36 |
generator = torch.Generator(device="cpu").manual_seed(int(seed))
|
| 37 |
|
|
@@ -120,6 +121,7 @@ with image_blocks as demo:
|
|
| 120 |
steps = gr.Number(value=30, minimum=10, maximum=100, step=1, label="steps")
|
| 121 |
seed = gr.Number(value=0, minimum=0, maximum=100000, step=1, label="seed")
|
| 122 |
negative_prompt = gr.Textbox(label="negative_prompt", value=default_negative_prompt, placeholder=default_negative_prompt, info="what you don't want to see in the image")
|
|
|
|
| 123 |
|
| 124 |
|
| 125 |
with gr.Column():
|
|
@@ -127,8 +129,8 @@ with image_blocks as demo:
|
|
| 127 |
|
| 128 |
|
| 129 |
|
| 130 |
-
btn.click(fn=predict, inputs=[image, prompt, negative_prompt, guidance_scale, steps, ip_adapter_scale, width, height, seed], outputs=[image_out], api_name='run')
|
| 131 |
-
prompt.submit(fn=predict, inputs=[image, prompt, negative_prompt, guidance_scale, steps, ip_adapter_scale, width, height, seed], outputs=[image_out])
|
| 132 |
|
| 133 |
# gr.Examples(
|
| 134 |
# examples=[
|
|
|
|
| 23 |
|
| 24 |
return content
|
| 25 |
|
| 26 |
+
def predict(image, prompt="high quality, best quality", negative_prompt="", guidance_scale=5, steps=30, ip_adapter_scale = 1.0, width=1024, height=1024, seed=0, center_crop=False):
|
| 27 |
|
| 28 |
pipe.set_ip_adapter_scale(ip_adapter_scale)
|
| 29 |
|
|
|
|
| 31 |
negative_prompt = None
|
| 32 |
|
| 33 |
init_image = image.convert("RGB")
|
| 34 |
+
if center_crop is False:
|
| 35 |
+
init_image = init_image.resize((224, 224))
|
| 36 |
|
| 37 |
generator = torch.Generator(device="cpu").manual_seed(int(seed))
|
| 38 |
|
|
|
|
| 121 |
steps = gr.Number(value=30, minimum=10, maximum=100, step=1, label="steps")
|
| 122 |
seed = gr.Number(value=0, minimum=0, maximum=100000, step=1, label="seed")
|
| 123 |
negative_prompt = gr.Textbox(label="negative_prompt", value=default_negative_prompt, placeholder=default_negative_prompt, info="what you don't want to see in the image")
|
| 124 |
+
center_crop = gr.Checkbox(label="Center Crop", info="If not checked, the image would be resized to square before it's fed to the model."),
|
| 125 |
|
| 126 |
|
| 127 |
with gr.Column():
|
|
|
|
| 129 |
|
| 130 |
|
| 131 |
|
| 132 |
+
btn.click(fn=predict, inputs=[image, prompt, negative_prompt, guidance_scale, steps, ip_adapter_scale, width, height, seed], center_crop, outputs=[image_out], api_name='run')
|
| 133 |
+
prompt.submit(fn=predict, inputs=[image, prompt, negative_prompt, guidance_scale, steps, ip_adapter_scale, width, height, seed, center_crop], outputs=[image_out])
|
| 134 |
|
| 135 |
# gr.Examples(
|
| 136 |
# examples=[
|