Spaces:
Runtime error
Runtime error
Matheus Ribeiro de Oliveira
commited on
Commit
·
4f9dece
1
Parent(s):
9be17fb
start app
Browse files
README.md
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
---
|
| 2 |
title: Image Editing
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 4.39.0
|
| 8 |
app_file: app.py
|
|
@@ -10,3 +10,5 @@ pinned: false
|
|
| 10 |
---
|
| 11 |
|
| 12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
title: Image Editing
|
| 3 |
+
emoji: 🌖
|
| 4 |
+
colorFrom: green
|
| 5 |
+
colorTo: red
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 4.39.0
|
| 8 |
app_file: app.py
|
|
|
|
| 10 |
---
|
| 11 |
|
| 12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
| 13 |
+
|
| 14 |
+
To run this app on GPU, please follow the instructions on the repo: https://github.com/matt576/image-editing
|
afm_gradio.py
ADDED
|
@@ -0,0 +1,408 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from PIL import Image, ImageDraw
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def run_afm_app(task_selector, input_image, mask_image, text_input, text_input_x, text_input_gsam, coord_input,
|
| 6 |
+
ddim_steps, ddim_steps_pipe, inpaint_input_gsam, text_input_inpaint_pipe, text_input_restyling,
|
| 7 |
+
blur, sharpen, prompt_outpaint, e_l, e_r, e_u, e_d, steps_outpaint, prompt_background , steps_br,
|
| 8 |
+
str_res, gs_res, np_res, steps_res, np_inpaint, steps_inpaint, prompt_txt2img, np_txt2img, gs_txt2img,
|
| 9 |
+
steps_txt2img, steps_super, dilation_bool, dilation_value, steps_inp):
|
| 10 |
+
|
| 11 |
+
print(f"Task selected: {task_selector}")
|
| 12 |
+
|
| 13 |
+
if task_selector == "SAM":
|
| 14 |
+
from mask_sam import sam_gradio
|
| 15 |
+
return sam_gradio(input_image, coord_input, dilation_bool, dilation_value)
|
| 16 |
+
|
| 17 |
+
if task_selector == "GroundedSAM":
|
| 18 |
+
from mask_groundedsam import groundedsam_mask_gradio
|
| 19 |
+
return groundedsam_mask_gradio(input_image, text_input, dilation_bool, dilation_value)
|
| 20 |
+
|
| 21 |
+
if task_selector == "Stable Diffusion with ControlNet Inpainting":
|
| 22 |
+
from inpaint_sd_controlnet import controlnet_inpaint_gradio
|
| 23 |
+
return controlnet_inpaint_gradio(input_image, mask_image, text_input_x)
|
| 24 |
+
|
| 25 |
+
if task_selector == "Stable Diffusion v1.5 Inpainting":
|
| 26 |
+
from inpaint_sd import inpaint_sd_gradio
|
| 27 |
+
return inpaint_sd_gradio(input_image, mask_image, text_input_x, steps_inp)
|
| 28 |
+
|
| 29 |
+
if task_selector == "Stable Diffusion XL Inpainting":
|
| 30 |
+
from inpaint_sdxl import inpaint_sdxl_gradio
|
| 31 |
+
return inpaint_sdxl_gradio(input_image, mask_image, text_input_x, steps_inp)
|
| 32 |
+
|
| 33 |
+
if task_selector == "Kandinsky v2.2 Inpainting":
|
| 34 |
+
from inpaint_kandinsky import inpaint_kandinsky_gradio
|
| 35 |
+
return inpaint_kandinsky_gradio(input_image, mask_image, text_input_x, steps_inp)
|
| 36 |
+
|
| 37 |
+
if task_selector == "GroundedSAM Inpainting":
|
| 38 |
+
from inpaint_groundedsam import groundedsam_inpaint_gradio
|
| 39 |
+
return groundedsam_inpaint_gradio(input_image, text_input_gsam, inpaint_input_gsam)
|
| 40 |
+
|
| 41 |
+
if task_selector == "Object Removal LDM":
|
| 42 |
+
from eraser_ldm import ldm_removal_gradio
|
| 43 |
+
return ldm_removal_gradio(input_image, mask_image, ddim_steps)
|
| 44 |
+
|
| 45 |
+
if task_selector == "Restyling - Stable Diffusion v1.5":
|
| 46 |
+
from restyling_sd import restyling_gradio
|
| 47 |
+
return restyling_gradio(input_image, text_input_restyling, str_res, gs_res, np_res, steps_res)
|
| 48 |
+
|
| 49 |
+
if task_selector == "Restyling - Stable Diffusion XL":
|
| 50 |
+
from restyling_sdxl import restyling_sdxl_gradio
|
| 51 |
+
return restyling_sdxl_gradio(input_image, text_input_restyling, str_res, gs_res, np_res, steps_res)
|
| 52 |
+
|
| 53 |
+
if task_selector == "Restyling - Kandinsky v2.2":
|
| 54 |
+
from restyling_kandinsky import restyling_kandinsky_gradio
|
| 55 |
+
return restyling_kandinsky_gradio(input_image, text_input_restyling, str_res, gs_res, np_res, steps_res)
|
| 56 |
+
|
| 57 |
+
if task_selector == "Superresolution - LDM x4 OpenImages":
|
| 58 |
+
from superres_ldm import superres_gradio
|
| 59 |
+
return superres_gradio(input_image, steps_super)
|
| 60 |
+
|
| 61 |
+
if task_selector == "Superresolution - Stability AI x4 Upscaler":
|
| 62 |
+
from superres_upscaler import superres_upscaler_gradio
|
| 63 |
+
return superres_upscaler_gradio(input_image, steps_super)
|
| 64 |
+
|
| 65 |
+
if task_selector == "LDM Removal Pipeline":
|
| 66 |
+
from eraser_ldm_pipe import ldm_removal_pipe_gradio
|
| 67 |
+
return ldm_removal_pipe_gradio(input_image, coord_input, ddim_steps_pipe)
|
| 68 |
+
|
| 69 |
+
if task_selector in ["Stable Diffusion v1.5 Inpainting Pipeline", "Stable Diffusion XL Inpainting Pipeline", "Kandinsky v2.2 Inpainting Pipeline"]:
|
| 70 |
+
from inpaint_pipe import inpaint_pipe_gradio
|
| 71 |
+
return inpaint_pipe_gradio(task_selector, input_image, coord_input, text_input_inpaint_pipe, np_inpaint, steps_inpaint)
|
| 72 |
+
|
| 73 |
+
if task_selector == "Stable Diffusion with ControlNet Inpainting Pipeline":
|
| 74 |
+
from inpaint_sd_controlnet_pipe import inpaint_func_pipe_gradio
|
| 75 |
+
return inpaint_func_pipe_gradio(input_image, coord_input, text_input_inpaint_pipe, np_inpaint, steps_inpaint)
|
| 76 |
+
|
| 77 |
+
if task_selector == "Portrait Mode - Depth Anything":
|
| 78 |
+
from blur_image import portrait_gradio
|
| 79 |
+
return portrait_gradio(input_image, blur, sharpen)
|
| 80 |
+
|
| 81 |
+
if task_selector == "Outpainting - Stable Diffusion":
|
| 82 |
+
from outpaint_sd import outpaint_sd_gradio
|
| 83 |
+
return outpaint_sd_gradio(input_image, prompt_outpaint, e_l, e_r, e_u, e_d, steps_outpaint)
|
| 84 |
+
|
| 85 |
+
if task_selector == "Outpainting - Stable Diffusion XL":
|
| 86 |
+
from outpaint_sdxl import outpaint_sdxl_gradio
|
| 87 |
+
return outpaint_sdxl_gradio(input_image, prompt_outpaint, e_l, e_r, e_u, e_d, steps_outpaint)
|
| 88 |
+
|
| 89 |
+
if task_selector == "Background Replacement - Stable Diffusion":
|
| 90 |
+
from background_replace_sd import background_replace_sd_gradio
|
| 91 |
+
return background_replace_sd_gradio(input_image, prompt_background , steps_br)
|
| 92 |
+
|
| 93 |
+
if task_selector == "Background Replacement - Stable Diffusion XL":
|
| 94 |
+
from background_replace_sdxl import background_replace_sdxl_gradio
|
| 95 |
+
return background_replace_sdxl_gradio(input_image, prompt_background , steps_br)
|
| 96 |
+
|
| 97 |
+
if task_selector in ["Stable Diffusion v1.5 Txt2Img", "Stable Diffusion XL Txt2Img", "Kandinsky v2.2 Txt2Img"]:
|
| 98 |
+
from txt2img_generation import txt2img_gradio
|
| 99 |
+
return txt2img_gradio(input_image, task_selector, prompt_txt2img, np_txt2img, gs_txt2img, steps_txt2img)
|
| 100 |
+
|
| 101 |
+
if task_selector == "Eraser - LaMa":
|
| 102 |
+
from eraser_lama import eraser_lama_gradio
|
| 103 |
+
return eraser_lama_gradio(input_image, mask_image)
|
| 104 |
+
|
| 105 |
+
selected_points = []
|
| 106 |
+
|
| 107 |
+
def input_handler(evt: gr.SelectData, input_image):
|
| 108 |
+
global selected_points
|
| 109 |
+
coords = evt.index
|
| 110 |
+
x, y = coords[0], coords[1]
|
| 111 |
+
selected_points.append([x, y])
|
| 112 |
+
coord_string = '; '.join([f"{pt[0]},{pt[1]}" for pt in selected_points])
|
| 113 |
+
|
| 114 |
+
image_with_points = input_image.copy()
|
| 115 |
+
draw = ImageDraw.Draw(image_with_points)
|
| 116 |
+
for point in selected_points:
|
| 117 |
+
draw.ellipse((point[0] - 2, point[1] - 2, point[0] + 2, point[1] + 2), fill="red", outline="red")
|
| 118 |
+
|
| 119 |
+
return coord_string, image_with_points
|
| 120 |
+
|
| 121 |
+
def reset_selected_points(input_image):
|
| 122 |
+
global selected_points
|
| 123 |
+
selected_points = []
|
| 124 |
+
print("Selected points have been reset.")
|
| 125 |
+
return "", input_image
|
| 126 |
+
|
| 127 |
+
def reload_image(original_image_path):
|
| 128 |
+
original_image = original_image_path
|
| 129 |
+
return original_image
|
| 130 |
+
|
| 131 |
+
def update_task_selector(task_selector, task):
|
| 132 |
+
return task
|
| 133 |
+
|
| 134 |
+
def reload_image_with_output(output_image):
|
| 135 |
+
return output_image
|
| 136 |
+
|
| 137 |
+
def reload_mask(output_image):
|
| 138 |
+
return output_image
|
| 139 |
+
|
| 140 |
+
title = "# AFM Image-Editing App"
|
| 141 |
+
|
| 142 |
+
if __name__ == "__main__":
|
| 143 |
+
block = gr.Blocks(theme='shivi/calm_seafoam')
|
| 144 |
+
|
| 145 |
+
with block:
|
| 146 |
+
gr.Markdown(title)
|
| 147 |
+
gr.Markdown(
|
| 148 |
+
"""
|
| 149 |
+
Welcome to the AFM Image-Editing App!
|
| 150 |
+
First, upload an input image or generate it via Txt2Img below.
|
| 151 |
+
Then, choose the desired task by navigating the tabs.
|
| 152 |
+
Finally, choose the model on the Dropdown within each tab and click on 'Generate'! Enjoy the App!
|
| 153 |
+
""")
|
| 154 |
+
|
| 155 |
+
original_image_path = "inputs/demo/milton.png" # Select input image path here
|
| 156 |
+
# original_image_path = "outputs/txt2img/generated_input.png" # for txt2img generated input image
|
| 157 |
+
input_mask_path = "inputs/gradio_masks/jessi_mask.png" # Optional, make sure it matches the input image
|
| 158 |
+
original_image = Image.open(original_image_path)
|
| 159 |
+
|
| 160 |
+
with gr.Row():
|
| 161 |
+
with gr.Column():
|
| 162 |
+
input_image = gr.Image(label="Input Image", sources='upload', type="pil", value=original_image_path, interactive=True)
|
| 163 |
+
with gr.Column():
|
| 164 |
+
output_image = gr.Image(label="Generated Image", type="pil")
|
| 165 |
+
|
| 166 |
+
with gr.Row():
|
| 167 |
+
generate_button = gr.Button("Generate!")
|
| 168 |
+
|
| 169 |
+
with gr.Row():
|
| 170 |
+
with gr.Column():
|
| 171 |
+
|
| 172 |
+
gr.Markdown("Type image coordinates manually or click on the image directly:")
|
| 173 |
+
coord_input = gr.Textbox(label="Pixel Coordinates (x,y), Format x1,y1; x2,y2 ...", value="")
|
| 174 |
+
reset_button = gr.Button("Reset coordinates")
|
| 175 |
+
reload_image_button = gr.Button("Clear Image")
|
| 176 |
+
reload_output_button = gr.Button("Load Output")
|
| 177 |
+
task_selector = gr.State(value="")
|
| 178 |
+
|
| 179 |
+
with gr.Accordion("Txt2Img Generation (Optional)", open=False):
|
| 180 |
+
tab_task_selector_11 = gr.Dropdown(["Stable Diffusion v1.5 Txt2Img",
|
| 181 |
+
"Stable Diffusion XL Txt2Img",
|
| 182 |
+
"Kandinsky v2.2 Txt2Img"], label="Select Model")
|
| 183 |
+
gr.Markdown("""
|
| 184 |
+
### Instructions
|
| 185 |
+
Use this feature if you wish to generate your own input image.
|
| 186 |
+
After generation, simply uncomment the original_image_path line on the gradio script and relaunch the app!
|
| 187 |
+
Required Inputs: Text Prompt, str_res, gs_res, np_res, steps_res
|
| 188 |
+
Example prompt: "Photorealistic Gotham City night skyline, rain pouring down, dark clouds with streaks of lightning."
|
| 189 |
+
Example negative prompt: "poor details, poor quality, blurry, deformed, extra limbs"
|
| 190 |
+
""")
|
| 191 |
+
prompt_txt2img = gr.Textbox(label="Text Prompt: ", value="Photorealistic Gotham City night skyline, Batman standing on top of skyscraper, close shot, unreal engine, cinematic, rain pouring down, dark clouds with streaks of lightning")
|
| 192 |
+
np_txt2img = gr.Textbox(label="Negative Prompt", value="poor details, poor quality, blurry, deformed, extra limbs")
|
| 193 |
+
gs_txt2img = gr.Slider(minimum=0.0, maximum=50.0, label="Guidance Scale", value=7.5)
|
| 194 |
+
steps_txt2img = gr.Slider(minimum=5, maximum=200, label="Number of inference steps", value=30, step=1)
|
| 195 |
+
|
| 196 |
+
with gr.Accordion("Mask Input Tasks (Optional)", open=False):
|
| 197 |
+
gr.Markdown("""
|
| 198 |
+
Here is the mask uploaded directly from the gradio script, if you wish to change it,
|
| 199 |
+
use the Mask Generation Preview Tab and click the 'Load Preview Mask' button.
|
| 200 |
+
""")
|
| 201 |
+
mask_image = gr.Image(label="Input Mask (Optional)", sources='upload', type="pil", value=input_mask_path)
|
| 202 |
+
|
| 203 |
+
with gr.Tab("Inpainting - Object Replacement"):
|
| 204 |
+
tab_task_selector_2 = gr.Dropdown(["Stable Diffusion with ControlNet Inpainting",
|
| 205 |
+
"Stable Diffusion v1.5 Inpainting",
|
| 206 |
+
"Stable Diffusion XL Inpainting",
|
| 207 |
+
"Kandinsky v2.2 Inpainting"],
|
| 208 |
+
label="Select Model")
|
| 209 |
+
gr.Markdown("""
|
| 210 |
+
### Instructions
|
| 211 |
+
All models in this section work with the given uploaded input mask.
|
| 212 |
+
Required Inputs: Input Mask (Upload) , Text Prompt - Object to replace masked area on given input mask below.
|
| 213 |
+
Input in the text box below the desired object to be inpainted in place of the mask input below.
|
| 214 |
+
Example prompt: "astronaut, white suit, 8k, extremely detailed, ornate, cinematic lighting, vivid, photorealistic, detailed, high quality"
|
| 215 |
+
""")
|
| 216 |
+
text_input_x = gr.Textbox(label="Text Prompt: ", value="astronaut, white suit, 8k, extremely detailed, ornate, cinematic lighting, vivid, photorealistic, detailed, high quality")
|
| 217 |
+
steps_inp = gr.Slider(minimum=5, maximum=200, label="Number of inference steps: ", value=50, step=1)
|
| 218 |
+
|
| 219 |
+
with gr.Tab("Object Removal"):
|
| 220 |
+
tab_task_selector_3 = gr.Dropdown(["Object Removal LDM", "Eraser - LaMa"], label="Select Model")
|
| 221 |
+
gr.Markdown("""
|
| 222 |
+
### Instructions
|
| 223 |
+
- **Object Removal LDM**:
|
| 224 |
+
Required inputs: Input image, Input Mask (Upload or from Preview), DDIM Steps
|
| 225 |
+
Given the uploaded mask below, simply adjust the slider below according to the desired number of iterations.
|
| 226 |
+
- **Eraser - LaMa**:
|
| 227 |
+
Required inputs: Input image, Input Mask (Upload or from Preview)
|
| 228 |
+
Please note, due to compability issues with the LaMa model and our gradio app, the output visualiztion will not
|
| 229 |
+
work in the app, but your output will be saved to: code/outputs/untracked/eraser-lama.
|
| 230 |
+
""")
|
| 231 |
+
ddim_steps = gr.Slider(minimum=5, maximum=250, label="Number of DDIM sampling steps for object removal LDM", value=150, step=1)
|
| 232 |
+
|
| 233 |
+
with gr.Column():
|
| 234 |
+
|
| 235 |
+
with gr.Tab("Mask Generation Preview"):
|
| 236 |
+
tab_task_selector_1 = gr.Dropdown(["SAM", "GroundedSAM"], label="Select Model")
|
| 237 |
+
reload_mask_button = gr.Button("Load Preview Mask")
|
| 238 |
+
gr.Markdown("""
|
| 239 |
+
### Instructions
|
| 240 |
+
- **SAM**:
|
| 241 |
+
Required inputs: Input Image, Pixel Coordinates, (Optional) Dilation
|
| 242 |
+
Type image coordinates manually or click on the image directly. Finally, simply click on the 'Generate' button.
|
| 243 |
+
""")
|
| 244 |
+
dilation_bool = gr.Dropdown(["Yes", "No"], label="Use dilation (recommended for inpainting)")
|
| 245 |
+
dilation_value = gr.Slider(minimum=0, maximum=50, label="Dilation value (recommended: 10) ", value=10, step = 1)
|
| 246 |
+
gr.Markdown("""
|
| 247 |
+
- **GroundedSAM (GroundingDINO + SAM)**:
|
| 248 |
+
Required Inputs: Text Prompt [object(s) to be detected], (Optional) Dilation
|
| 249 |
+
Input in the text box below the object(s) in the input image for which the masks are to be generated.
|
| 250 |
+
""")
|
| 251 |
+
text_input = gr.Textbox(label="Text Prompt: ", value="dog")
|
| 252 |
+
|
| 253 |
+
with gr.Tab("Restyling"):
|
| 254 |
+
tab_task_selector_4 = gr.Dropdown(["Restyling - Stable Diffusion v1.5",
|
| 255 |
+
"Restyling - Stable Diffusion XL",
|
| 256 |
+
"Restyling - Kandinsky v2.2"], label="Select Model")
|
| 257 |
+
gr.Markdown("""
|
| 258 |
+
### Instructions
|
| 259 |
+
Required Inputs: Input Image, Text Prompt, str_res, gs_res, np_res, steps_res
|
| 260 |
+
Example Text Prompt: "Photorealistic Gotham City night skyline, rain pouring down, dark clouds with streaks of lightning."
|
| 261 |
+
Example Negative Prompt: "poor details, poor quality, blurry, deformed, extra limbs"
|
| 262 |
+
""")
|
| 263 |
+
text_input_restyling = gr.Textbox(label="Text Prompt: ", value="Futuristic night city from Cyberpunk 2077, rainy night, close shot, 35 mm, realism, octane render, 8 k, exploration, cinematic, pixbay, modernist, realistic, unreal engine, hyper detailed, photorealistic, maximum detail, volumetric light, moody cinematic epic concept art, vivid")
|
| 264 |
+
str_res = gr.Slider(minimum=0.1, maximum=1.0, label="Strength: ", value=0.75, step=0.01)
|
| 265 |
+
gs_res = gr.Slider(minimum=0.0, maximum=50.0, label="Guidance Scale: ", value=7.5, step=0.1)
|
| 266 |
+
np_res = gr.Textbox(label="Negative Prompt: ", value="poor details, poor quality, blurry, deformed, extra limbs")
|
| 267 |
+
steps_res = gr.Slider(minimum=5, maximum=150, label="Number of inference steps: ", value=30, step=1)
|
| 268 |
+
|
| 269 |
+
with gr.Tab("Superresolution"):
|
| 270 |
+
tab_task_selector_5 = gr.Dropdown(["Superresolution - LDM x4 OpenImages",
|
| 271 |
+
"Superresolution - Stability AI x4 Upscaler"], label="Select Model")
|
| 272 |
+
gr.Markdown("""
|
| 273 |
+
### Instructions
|
| 274 |
+
Required Inputs: Input Image, Number of Inference Steps
|
| 275 |
+
Select model on the Dropdown menu, number of inference steps, and click the 'Generate' button to get your new image.
|
| 276 |
+
""")
|
| 277 |
+
steps_super = gr.Slider(minimum=5, maximum=150, label="Number of inference steps: ", value=30, step=1)
|
| 278 |
+
|
| 279 |
+
with gr.Tab("Pipeline: Inpainting - Object Replacement"):
|
| 280 |
+
tab_task_selector_6 = gr.Dropdown(["GroundedSAM Inpainting",
|
| 281 |
+
"Stable Diffusion with ControlNet Inpainting Pipeline",
|
| 282 |
+
"Stable Diffusion v1.5 Inpainting Pipeline",
|
| 283 |
+
"Stable Diffusion XL Inpainting Pipeline",
|
| 284 |
+
"Kandinsky v2.2 Inpainting Pipeline"], label="Select Model")
|
| 285 |
+
gr.Markdown("""
|
| 286 |
+
- **GroundedSAM Inpainting (GroundingDINO + SAM + Stable Diffusion)**:
|
| 287 |
+
Required Inputs: Input Image, Detection Prompt , Inpainting Prompt
|
| 288 |
+
Input in the text box below the object(s) in the input image for which the masks are to be generated.
|
| 289 |
+
Example detection prompt: "dog"
|
| 290 |
+
Example inpaint prompt: "white tiger, photorealistic, detailed, high quality"
|
| 291 |
+
""")
|
| 292 |
+
text_input_gsam = gr.Textbox(label="Detection Prompt: ", value="dog")
|
| 293 |
+
inpaint_input_gsam = gr.Textbox(label="Inpainting Prompt: ", value="astronaut, white suit, 8k, extremely detailed, ornate, cinematic lighting, vivid, photorealistic, detailed, high quality")
|
| 294 |
+
gr.Markdown("""
|
| 295 |
+
- **Kandinsky v2.2 / Stable Diffusion v1.5 / SDXL / SD + ControlNet**:
|
| 296 |
+
Required Inputs: Input Image, Pixel Coodinates , Inpainting Prompt
|
| 297 |
+
Input in the text box below the object(s) in the input image for which the masks are to be generated.
|
| 298 |
+
Example Text Prompt: "white tiger, photorealistic, detailed, high quality"
|
| 299 |
+
Example Negative Prompt: "poor details, poor quality, blurry, deformed, extra limbs"
|
| 300 |
+
""")
|
| 301 |
+
text_input_inpaint_pipe = gr.Textbox(label="Text Prompt: ", value="astronaut, white suit, 8k, extremely detailed, ornate, cinematic lighting, vivid, photorealistic, detailed, high quality")
|
| 302 |
+
np_inpaint = gr.Textbox(label="Negative Prompt: ", value="poor details, poor quality, blurry, deformed, extra limbs")
|
| 303 |
+
steps_inpaint = gr.Slider(minimum=5, maximum=200, label="Number of inference steps: ", value=150, step=1)
|
| 304 |
+
|
| 305 |
+
with gr.Tab("Pipeline - Object Removal"):
|
| 306 |
+
tab_task_selector_7 = gr.Dropdown(["LDM Removal Pipeline", " "], label="Select Model")
|
| 307 |
+
gr.Markdown("""
|
| 308 |
+
### Instructions
|
| 309 |
+
- **LDM Removal Pipeline**:
|
| 310 |
+
Required inputs: Input Image, Pixel Coodinates, DDIM Steps
|
| 311 |
+
If you wish to view the mask before the fnal output, go to the 'Mask Generation Preview' Tab.
|
| 312 |
+
Type the image coordinates manually in the box under the image or click on the image directly.
|
| 313 |
+
For a more detailed mask of a specific object or part of it, select multiple points.
|
| 314 |
+
Finally, choose number of DDIM steps simply click on the 'Generate' button:
|
| 315 |
+
""")
|
| 316 |
+
ddim_steps_pipe = gr.Slider(minimum=5, maximum=250, label="Number of DDIM sampling steps for object removal", value=150, step=1)
|
| 317 |
+
|
| 318 |
+
with gr.Tab("Background Blurring"):
|
| 319 |
+
tab_task_selector_8 = gr.Dropdown(["Portrait Mode - Depth Anything"], label='Select Model')
|
| 320 |
+
gr.Markdown("""
|
| 321 |
+
### Instructions
|
| 322 |
+
- **Portrait Mode - Depth Anything**:
|
| 323 |
+
Required inputs: Input Image, box blur, sharpen
|
| 324 |
+
Recommended blur values range: 2-25
|
| 325 |
+
Recommended sharpen values range: 0-5
|
| 326 |
+
Adjust the required inputs with the siders below:
|
| 327 |
+
""")
|
| 328 |
+
blur = gr.Slider(minimum=0, maximum=50, label="Box Blur value", value=5, step=1)
|
| 329 |
+
sharpen = gr.Slider(minimum=0, maximum=7, label="Sharpen Parameter", value=0, step=1)
|
| 330 |
+
|
| 331 |
+
with gr.Tab("Outpainting"):
|
| 332 |
+
tab_task_selector_9 = gr.Dropdown(["Outpainting - Stable Diffusion", "Outpainting - Stable Diffusion XL"], label='Select Model')
|
| 333 |
+
gr.Markdown("""
|
| 334 |
+
### Instructions
|
| 335 |
+
- **Outpainting - Stable Diffusion**:
|
| 336 |
+
Required inputs: Input Image, Text Prompt, extend left/right/up/down, steps
|
| 337 |
+
Choose how much and which direction you want to extend /outpaint your image and specify a text prompt.
|
| 338 |
+
Example prompt: "open plan, kitchen and living room, black umbrella on the floor, modular furniture with cotton textiles, wooden floor, high ceiling, large steel windows viewing a city"
|
| 339 |
+
""")
|
| 340 |
+
prompt_outpaint = gr.Textbox(label="Text Prompt: ", value="open plan, kitchen and living room, black umbrella on the floor, modular furniture with cotton textiles, wooden floor, high ceiling, large steel windows viewing a city")
|
| 341 |
+
e_l = gr.Slider(minimum=0, maximum=1000, label="Extend Left", value=200, step=1)
|
| 342 |
+
e_r = gr.Slider(minimum=0, maximum=1000, label="Extend Right", value=200, step=1)
|
| 343 |
+
e_u = gr.Slider(minimum=0, maximum=1000, label="Extend Up", value=200, step=1)
|
| 344 |
+
e_d = gr.Slider(minimum=0, maximum=1000, label="Extend Down", value=200, step=1)
|
| 345 |
+
steps_outpaint = gr.Slider(minimum=0, maximum=200, label="Number of Steps", value=50, step=1)
|
| 346 |
+
|
| 347 |
+
with gr.Tab("Background Replacement"):
|
| 348 |
+
tab_task_selector_10 = gr.Dropdown(["Background Replacement - Stable Diffusion", "Background Replacement - Stable Diffusion XL"], label='Select Model')
|
| 349 |
+
gr.Markdown("""
|
| 350 |
+
### Instructions
|
| 351 |
+
- **Background Replacement - Stable Diffusion**:
|
| 352 |
+
Required inputs: Input Image, Text Prompt, steps
|
| 353 |
+
Specify the new background in the text box below.
|
| 354 |
+
Example prompt: "dog sitting on the beach, sunny day, blue sky"
|
| 355 |
+
""")
|
| 356 |
+
prompt_background = gr.Textbox(label="Text Prompt: ", value="dog sitting on the beach, sunny day, blue sky, cinematic, pixbay, modernist, realistic, unreal engine, hyper detailed, photorealistic, maximum detail, volumetric light, moody cinematic epic concept art, vivid")
|
| 357 |
+
steps_br = gr.Slider(minimum=0, maximum=200, label="Number of Steps", value=30, step=1)
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
input_image.select(input_handler, inputs=[input_image], outputs=[coord_input, input_image])
|
| 362 |
+
|
| 363 |
+
generate_button.click(
|
| 364 |
+
fn=run_afm_app,
|
| 365 |
+
inputs=[task_selector, input_image, mask_image, text_input, text_input_x, text_input_gsam, coord_input, ddim_steps, ddim_steps_pipe,
|
| 366 |
+
inpaint_input_gsam, text_input_inpaint_pipe, text_input_restyling, blur, sharpen, prompt_outpaint, e_l, e_r, e_u, e_d, steps_outpaint,
|
| 367 |
+
prompt_background, steps_br, str_res, gs_res, np_res, steps_res, np_inpaint, steps_inpaint, prompt_txt2img, np_txt2img, gs_txt2img,
|
| 368 |
+
steps_txt2img, steps_super, dilation_bool, dilation_value, steps_inp],
|
| 369 |
+
outputs=output_image
|
| 370 |
+
)
|
| 371 |
+
|
| 372 |
+
reset_button.click(
|
| 373 |
+
fn=reset_selected_points,
|
| 374 |
+
inputs=[input_image],
|
| 375 |
+
outputs=[coord_input, input_image]
|
| 376 |
+
)
|
| 377 |
+
|
| 378 |
+
reload_image_button.click(
|
| 379 |
+
fn=reload_image,
|
| 380 |
+
inputs=[gr.State(original_image_path)],
|
| 381 |
+
outputs=[input_image]
|
| 382 |
+
)
|
| 383 |
+
|
| 384 |
+
reload_output_button.click(
|
| 385 |
+
fn=reload_image_with_output,
|
| 386 |
+
inputs=[output_image],
|
| 387 |
+
outputs=[input_image]
|
| 388 |
+
)
|
| 389 |
+
|
| 390 |
+
reload_mask_button.click(
|
| 391 |
+
fn=reload_mask,
|
| 392 |
+
inputs=[output_image],
|
| 393 |
+
outputs=[mask_image]
|
| 394 |
+
)
|
| 395 |
+
|
| 396 |
+
tab_task_selector_1.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_1], outputs=[task_selector])
|
| 397 |
+
tab_task_selector_2.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_2], outputs=[task_selector])
|
| 398 |
+
tab_task_selector_3.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_3], outputs=[task_selector])
|
| 399 |
+
tab_task_selector_4.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_4], outputs=[task_selector])
|
| 400 |
+
tab_task_selector_5.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_5], outputs=[task_selector])
|
| 401 |
+
tab_task_selector_6.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_6], outputs=[task_selector])
|
| 402 |
+
tab_task_selector_7.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_7], outputs=[task_selector])
|
| 403 |
+
tab_task_selector_8.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_8], outputs=[task_selector])
|
| 404 |
+
tab_task_selector_9.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_9], outputs=[task_selector])
|
| 405 |
+
tab_task_selector_10.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_10], outputs=[task_selector])
|
| 406 |
+
tab_task_selector_11.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_11], outputs=[task_selector])
|
| 407 |
+
|
| 408 |
+
block.launch(share=True)
|
app.py
ADDED
|
@@ -0,0 +1,409 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from PIL import Image, ImageDraw
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def run_afm_app(task_selector, input_image, mask_image, text_input, text_input_x, text_input_gsam, coord_input,
|
| 6 |
+
ddim_steps, ddim_steps_pipe, inpaint_input_gsam, text_input_inpaint_pipe, text_input_restyling,
|
| 7 |
+
blur, sharpen, prompt_outpaint, e_l, e_r, e_u, e_d, steps_outpaint, prompt_background , steps_br,
|
| 8 |
+
str_res, gs_res, np_res, steps_res, np_inpaint, steps_inpaint, prompt_txt2img, np_txt2img, gs_txt2img,
|
| 9 |
+
steps_txt2img, steps_super, dilation_bool, dilation_value, steps_inp):
|
| 10 |
+
|
| 11 |
+
print(f"Task selected: {task_selector}")
|
| 12 |
+
|
| 13 |
+
if task_selector == "SAM":
|
| 14 |
+
from mask_sam import sam_gradio
|
| 15 |
+
return sam_gradio(input_image, coord_input, dilation_bool, dilation_value)
|
| 16 |
+
|
| 17 |
+
if task_selector == "GroundedSAM":
|
| 18 |
+
from mask_groundedsam import groundedsam_mask_gradio
|
| 19 |
+
return groundedsam_mask_gradio(input_image, text_input, dilation_bool, dilation_value)
|
| 20 |
+
|
| 21 |
+
if task_selector == "Stable Diffusion with ControlNet Inpainting":
|
| 22 |
+
from inpaint_sd_controlnet import controlnet_inpaint_gradio
|
| 23 |
+
return controlnet_inpaint_gradio(input_image, mask_image, text_input_x)
|
| 24 |
+
|
| 25 |
+
if task_selector == "Stable Diffusion v1.5 Inpainting":
|
| 26 |
+
from inpaint_sd import inpaint_sd_gradio
|
| 27 |
+
return inpaint_sd_gradio(input_image, mask_image, text_input_x, steps_inp)
|
| 28 |
+
|
| 29 |
+
if task_selector == "Stable Diffusion XL Inpainting":
|
| 30 |
+
from inpaint_sdxl import inpaint_sdxl_gradio
|
| 31 |
+
return inpaint_sdxl_gradio(input_image, mask_image, text_input_x, steps_inp)
|
| 32 |
+
|
| 33 |
+
if task_selector == "Kandinsky v2.2 Inpainting":
|
| 34 |
+
from inpaint_kandinsky import inpaint_kandinsky_gradio
|
| 35 |
+
return inpaint_kandinsky_gradio(input_image, mask_image, text_input_x, steps_inp)
|
| 36 |
+
|
| 37 |
+
if task_selector == "GroundedSAM Inpainting":
|
| 38 |
+
from inpaint_groundedsam import groundedsam_inpaint_gradio
|
| 39 |
+
return groundedsam_inpaint_gradio(input_image, text_input_gsam, inpaint_input_gsam)
|
| 40 |
+
|
| 41 |
+
if task_selector == "Object Removal LDM":
|
| 42 |
+
from eraser_ldm import ldm_removal_gradio
|
| 43 |
+
return ldm_removal_gradio(input_image, mask_image, ddim_steps)
|
| 44 |
+
|
| 45 |
+
if task_selector == "Restyling - Stable Diffusion v1.5":
|
| 46 |
+
from restyling_sd import restyling_gradio
|
| 47 |
+
return restyling_gradio(input_image, text_input_restyling, str_res, gs_res, np_res, steps_res)
|
| 48 |
+
|
| 49 |
+
if task_selector == "Restyling - Stable Diffusion XL":
|
| 50 |
+
from restyling_sdxl import restyling_sdxl_gradio
|
| 51 |
+
return restyling_sdxl_gradio(input_image, text_input_restyling, str_res, gs_res, np_res, steps_res)
|
| 52 |
+
|
| 53 |
+
if task_selector == "Restyling - Kandinsky v2.2":
|
| 54 |
+
from restyling_kandinsky import restyling_kandinsky_gradio
|
| 55 |
+
return restyling_kandinsky_gradio(input_image, text_input_restyling, str_res, gs_res, np_res, steps_res)
|
| 56 |
+
|
| 57 |
+
if task_selector == "Superresolution - LDM x4 OpenImages":
|
| 58 |
+
from superres_ldm import superres_gradio
|
| 59 |
+
return superres_gradio(input_image, steps_super)
|
| 60 |
+
|
| 61 |
+
if task_selector == "Superresolution - Stability AI x4 Upscaler":
|
| 62 |
+
from superres_upscaler import superres_upscaler_gradio
|
| 63 |
+
return superres_upscaler_gradio(input_image, steps_super)
|
| 64 |
+
|
| 65 |
+
if task_selector == "LDM Removal Pipeline":
|
| 66 |
+
from eraser_ldm_pipe import ldm_removal_pipe_gradio
|
| 67 |
+
return ldm_removal_pipe_gradio(input_image, coord_input, ddim_steps_pipe)
|
| 68 |
+
|
| 69 |
+
if task_selector in ["Stable Diffusion v1.5 Inpainting Pipeline", "Stable Diffusion XL Inpainting Pipeline", "Kandinsky v2.2 Inpainting Pipeline"]:
|
| 70 |
+
from inpaint_pipe import inpaint_pipe_gradio
|
| 71 |
+
return inpaint_pipe_gradio(task_selector, input_image, coord_input, text_input_inpaint_pipe, np_inpaint, steps_inpaint)
|
| 72 |
+
|
| 73 |
+
if task_selector == "Stable Diffusion with ControlNet Inpainting Pipeline":
|
| 74 |
+
from inpaint_sd_controlnet_pipe import inpaint_func_pipe_gradio
|
| 75 |
+
return inpaint_func_pipe_gradio(input_image, coord_input, text_input_inpaint_pipe, np_inpaint, steps_inpaint)
|
| 76 |
+
|
| 77 |
+
if task_selector == "Portrait Mode - Depth Anything":
|
| 78 |
+
from blur_image import portrait_gradio
|
| 79 |
+
return portrait_gradio(input_image, blur, sharpen)
|
| 80 |
+
|
| 81 |
+
if task_selector == "Outpainting - Stable Diffusion":
|
| 82 |
+
from outpaint_sd import outpaint_sd_gradio
|
| 83 |
+
return outpaint_sd_gradio(input_image, prompt_outpaint, e_l, e_r, e_u, e_d, steps_outpaint)
|
| 84 |
+
|
| 85 |
+
if task_selector == "Outpainting - Stable Diffusion XL":
|
| 86 |
+
from outpaint_sdxl import outpaint_sdxl_gradio
|
| 87 |
+
return outpaint_sdxl_gradio(input_image, prompt_outpaint, e_l, e_r, e_u, e_d, steps_outpaint)
|
| 88 |
+
|
| 89 |
+
if task_selector == "Background Replacement - Stable Diffusion":
|
| 90 |
+
from background_replace_sd import background_replace_sd_gradio
|
| 91 |
+
return background_replace_sd_gradio(input_image, prompt_background , steps_br)
|
| 92 |
+
|
| 93 |
+
if task_selector == "Background Replacement - Stable Diffusion XL":
|
| 94 |
+
from background_replace_sdxl import background_replace_sdxl_gradio
|
| 95 |
+
return background_replace_sdxl_gradio(input_image, prompt_background , steps_br)
|
| 96 |
+
|
| 97 |
+
if task_selector in ["Stable Diffusion v1.5 Txt2Img", "Stable Diffusion XL Txt2Img", "Kandinsky v2.2 Txt2Img"]:
|
| 98 |
+
from txt2img_generation import txt2img_gradio
|
| 99 |
+
return txt2img_gradio(input_image, task_selector, prompt_txt2img, np_txt2img, gs_txt2img, steps_txt2img)
|
| 100 |
+
|
| 101 |
+
if task_selector == "Eraser - LaMa":
|
| 102 |
+
from eraser_lama import eraser_lama_gradio
|
| 103 |
+
return eraser_lama_gradio(input_image, mask_image)
|
| 104 |
+
|
| 105 |
+
selected_points = []
|
| 106 |
+
|
| 107 |
+
def input_handler(evt: gr.SelectData, input_image):
|
| 108 |
+
global selected_points
|
| 109 |
+
coords = evt.index
|
| 110 |
+
x, y = coords[0], coords[1]
|
| 111 |
+
selected_points.append([x, y])
|
| 112 |
+
coord_string = '; '.join([f"{pt[0]},{pt[1]}" for pt in selected_points])
|
| 113 |
+
|
| 114 |
+
image_with_points = input_image.copy()
|
| 115 |
+
draw = ImageDraw.Draw(image_with_points)
|
| 116 |
+
for point in selected_points:
|
| 117 |
+
draw.ellipse((point[0] - 2, point[1] - 2, point[0] + 2, point[1] + 2), fill="red", outline="red")
|
| 118 |
+
|
| 119 |
+
return coord_string, image_with_points
|
| 120 |
+
|
| 121 |
+
def reset_selected_points(input_image):
|
| 122 |
+
global selected_points
|
| 123 |
+
selected_points = []
|
| 124 |
+
print("Selected points have been reset.")
|
| 125 |
+
return "", input_image
|
| 126 |
+
|
| 127 |
+
def reload_image(original_image_path):
|
| 128 |
+
original_image = original_image_path
|
| 129 |
+
return original_image
|
| 130 |
+
|
| 131 |
+
def update_task_selector(task_selector, task):
|
| 132 |
+
return task
|
| 133 |
+
|
| 134 |
+
def reload_image_with_output(output_image):
|
| 135 |
+
return output_image
|
| 136 |
+
|
| 137 |
+
def reload_mask(output_image):
|
| 138 |
+
return output_image
|
| 139 |
+
|
| 140 |
+
title = "# AFM Image-Editing App"
|
| 141 |
+
|
| 142 |
+
if __name__ == "__main__":
|
| 143 |
+
block = gr.Blocks(theme='shivi/calm_seafoam')
|
| 144 |
+
|
| 145 |
+
with block:
|
| 146 |
+
gr.Markdown(title)
|
| 147 |
+
gr.Markdown(
|
| 148 |
+
"""
|
| 149 |
+
WARNING: To run this app on GPU, please follow the instructions on the repo: https://github.com/matt576/image-editing
|
| 150 |
+
Welcome to the AFM Image-Editing App!
|
| 151 |
+
First, upload an input image or generate it via Txt2Img below.
|
| 152 |
+
Then, choose the desired task by navigating the tabs.
|
| 153 |
+
Finally, choose the model on the Dropdown within each tab and click on 'Generate'! Enjoy the App!
|
| 154 |
+
""")
|
| 155 |
+
|
| 156 |
+
original_image_path = "scott.png" # Select input image path here
|
| 157 |
+
# original_image_path = "outputs/txt2img/generated_input.png" # for txt2img generated input image
|
| 158 |
+
input_mask_path = "scott.png" # Optional, make sure it matches the input image
|
| 159 |
+
original_image = Image.open(original_image_path)
|
| 160 |
+
|
| 161 |
+
with gr.Row():
|
| 162 |
+
with gr.Column():
|
| 163 |
+
input_image = gr.Image(label="Input Image", sources='upload', type="pil", value=original_image_path, interactive=True)
|
| 164 |
+
with gr.Column():
|
| 165 |
+
output_image = gr.Image(label="Generated Image", type="pil")
|
| 166 |
+
|
| 167 |
+
with gr.Row():
|
| 168 |
+
generate_button = gr.Button("Generate!")
|
| 169 |
+
|
| 170 |
+
with gr.Row():
|
| 171 |
+
with gr.Column():
|
| 172 |
+
|
| 173 |
+
gr.Markdown("Type image coordinates manually or click on the image directly:")
|
| 174 |
+
coord_input = gr.Textbox(label="Pixel Coordinates (x,y), Format x1,y1; x2,y2 ...", value="")
|
| 175 |
+
reset_button = gr.Button("Reset coordinates")
|
| 176 |
+
reload_image_button = gr.Button("Clear Image")
|
| 177 |
+
reload_output_button = gr.Button("Load Output")
|
| 178 |
+
task_selector = gr.State(value="")
|
| 179 |
+
|
| 180 |
+
with gr.Accordion("Txt2Img Generation (Optional)", open=False):
|
| 181 |
+
tab_task_selector_11 = gr.Dropdown(["Stable Diffusion v1.5 Txt2Img",
|
| 182 |
+
"Stable Diffusion XL Txt2Img",
|
| 183 |
+
"Kandinsky v2.2 Txt2Img"], label="Select Model")
|
| 184 |
+
gr.Markdown("""
|
| 185 |
+
### Instructions
|
| 186 |
+
Use this feature if you wish to generate your own input image.
|
| 187 |
+
After generation, simply uncomment the original_image_path line on the gradio script and relaunch the app!
|
| 188 |
+
Required Inputs: Text Prompt, str_res, gs_res, np_res, steps_res
|
| 189 |
+
Example prompt: "Photorealistic Gotham City night skyline, rain pouring down, dark clouds with streaks of lightning."
|
| 190 |
+
Example negative prompt: "poor details, poor quality, blurry, deformed, extra limbs"
|
| 191 |
+
""")
|
| 192 |
+
prompt_txt2img = gr.Textbox(label="Text Prompt: ", value="Photorealistic Gotham City night skyline, Batman standing on top of skyscraper, close shot, unreal engine, cinematic, rain pouring down, dark clouds with streaks of lightning")
|
| 193 |
+
np_txt2img = gr.Textbox(label="Negative Prompt", value="poor details, poor quality, blurry, deformed, extra limbs")
|
| 194 |
+
gs_txt2img = gr.Slider(minimum=0.0, maximum=50.0, label="Guidance Scale", value=7.5)
|
| 195 |
+
steps_txt2img = gr.Slider(minimum=5, maximum=200, label="Number of inference steps", value=30, step=1)
|
| 196 |
+
|
| 197 |
+
with gr.Accordion("Mask Input Tasks (Optional)", open=False):
|
| 198 |
+
gr.Markdown("""
|
| 199 |
+
Here is the mask uploaded directly from the gradio script, if you wish to change it,
|
| 200 |
+
use the Mask Generation Preview Tab and click the 'Load Preview Mask' button.
|
| 201 |
+
""")
|
| 202 |
+
mask_image = gr.Image(label="Input Mask (Optional)", sources='upload', type="pil", value=input_mask_path)
|
| 203 |
+
|
| 204 |
+
with gr.Tab("Inpainting - Object Replacement"):
|
| 205 |
+
tab_task_selector_2 = gr.Dropdown(["Stable Diffusion with ControlNet Inpainting",
|
| 206 |
+
"Stable Diffusion v1.5 Inpainting",
|
| 207 |
+
"Stable Diffusion XL Inpainting",
|
| 208 |
+
"Kandinsky v2.2 Inpainting"],
|
| 209 |
+
label="Select Model")
|
| 210 |
+
gr.Markdown("""
|
| 211 |
+
### Instructions
|
| 212 |
+
All models in this section work with the given uploaded input mask.
|
| 213 |
+
Required Inputs: Input Mask (Upload) , Text Prompt - Object to replace masked area on given input mask below.
|
| 214 |
+
Input in the text box below the desired object to be inpainted in place of the mask input below.
|
| 215 |
+
Example prompt: "astronaut, white suit, 8k, extremely detailed, ornate, cinematic lighting, vivid, photorealistic, detailed, high quality"
|
| 216 |
+
""")
|
| 217 |
+
text_input_x = gr.Textbox(label="Text Prompt: ", value="astronaut, white suit, 8k, extremely detailed, ornate, cinematic lighting, vivid, photorealistic, detailed, high quality")
|
| 218 |
+
steps_inp = gr.Slider(minimum=5, maximum=200, label="Number of inference steps: ", value=50, step=1)
|
| 219 |
+
|
| 220 |
+
with gr.Tab("Object Removal"):
|
| 221 |
+
tab_task_selector_3 = gr.Dropdown(["Object Removal LDM", "Eraser - LaMa"], label="Select Model")
|
| 222 |
+
gr.Markdown("""
|
| 223 |
+
### Instructions
|
| 224 |
+
- **Object Removal LDM**:
|
| 225 |
+
Required inputs: Input image, Input Mask (Upload or from Preview), DDIM Steps
|
| 226 |
+
Given the uploaded mask below, simply adjust the slider below according to the desired number of iterations.
|
| 227 |
+
- **Eraser - LaMa**:
|
| 228 |
+
Required inputs: Input image, Input Mask (Upload or from Preview)
|
| 229 |
+
Please note, due to compability issues with the LaMa model and our gradio app, the output visualiztion will not
|
| 230 |
+
work in the app, but your output will be saved to: code/outputs/untracked/eraser-lama.
|
| 231 |
+
""")
|
| 232 |
+
ddim_steps = gr.Slider(minimum=5, maximum=250, label="Number of DDIM sampling steps for object removal LDM", value=150, step=1)
|
| 233 |
+
|
| 234 |
+
with gr.Column():
|
| 235 |
+
|
| 236 |
+
with gr.Tab("Mask Generation Preview"):
|
| 237 |
+
tab_task_selector_1 = gr.Dropdown(["SAM", "GroundedSAM"], label="Select Model")
|
| 238 |
+
reload_mask_button = gr.Button("Load Preview Mask")
|
| 239 |
+
gr.Markdown("""
|
| 240 |
+
### Instructions
|
| 241 |
+
- **SAM**:
|
| 242 |
+
Required inputs: Input Image, Pixel Coordinates, (Optional) Dilation
|
| 243 |
+
Type image coordinates manually or click on the image directly. Finally, simply click on the 'Generate' button.
|
| 244 |
+
""")
|
| 245 |
+
dilation_bool = gr.Dropdown(["Yes", "No"], label="Use dilation (recommended for inpainting)")
|
| 246 |
+
dilation_value = gr.Slider(minimum=0, maximum=50, label="Dilation value (recommended: 10) ", value=10, step = 1)
|
| 247 |
+
gr.Markdown("""
|
| 248 |
+
- **GroundedSAM (GroundingDINO + SAM)**:
|
| 249 |
+
Required Inputs: Text Prompt [object(s) to be detected], (Optional) Dilation
|
| 250 |
+
Input in the text box below the object(s) in the input image for which the masks are to be generated.
|
| 251 |
+
""")
|
| 252 |
+
text_input = gr.Textbox(label="Text Prompt: ", value="dog")
|
| 253 |
+
|
| 254 |
+
with gr.Tab("Restyling"):
|
| 255 |
+
tab_task_selector_4 = gr.Dropdown(["Restyling - Stable Diffusion v1.5",
|
| 256 |
+
"Restyling - Stable Diffusion XL",
|
| 257 |
+
"Restyling - Kandinsky v2.2"], label="Select Model")
|
| 258 |
+
gr.Markdown("""
|
| 259 |
+
### Instructions
|
| 260 |
+
Required Inputs: Input Image, Text Prompt, str_res, gs_res, np_res, steps_res
|
| 261 |
+
Example Text Prompt: "Photorealistic Gotham City night skyline, rain pouring down, dark clouds with streaks of lightning."
|
| 262 |
+
Example Negative Prompt: "poor details, poor quality, blurry, deformed, extra limbs"
|
| 263 |
+
""")
|
| 264 |
+
text_input_restyling = gr.Textbox(label="Text Prompt: ", value="Futuristic night city from Cyberpunk 2077, rainy night, close shot, 35 mm, realism, octane render, 8 k, exploration, cinematic, pixbay, modernist, realistic, unreal engine, hyper detailed, photorealistic, maximum detail, volumetric light, moody cinematic epic concept art, vivid")
|
| 265 |
+
str_res = gr.Slider(minimum=0.1, maximum=1.0, label="Strength: ", value=0.75, step=0.01)
|
| 266 |
+
gs_res = gr.Slider(minimum=0.0, maximum=50.0, label="Guidance Scale: ", value=7.5, step=0.1)
|
| 267 |
+
np_res = gr.Textbox(label="Negative Prompt: ", value="poor details, poor quality, blurry, deformed, extra limbs")
|
| 268 |
+
steps_res = gr.Slider(minimum=5, maximum=150, label="Number of inference steps: ", value=30, step=1)
|
| 269 |
+
|
| 270 |
+
with gr.Tab("Superresolution"):
|
| 271 |
+
tab_task_selector_5 = gr.Dropdown(["Superresolution - LDM x4 OpenImages",
|
| 272 |
+
"Superresolution - Stability AI x4 Upscaler"], label="Select Model")
|
| 273 |
+
gr.Markdown("""
|
| 274 |
+
### Instructions
|
| 275 |
+
Required Inputs: Input Image, Number of Inference Steps
|
| 276 |
+
Select model on the Dropdown menu, number of inference steps, and click the 'Generate' button to get your new image.
|
| 277 |
+
""")
|
| 278 |
+
steps_super = gr.Slider(minimum=5, maximum=150, label="Number of inference steps: ", value=30, step=1)
|
| 279 |
+
|
| 280 |
+
with gr.Tab("Pipeline: Inpainting - Object Replacement"):
|
| 281 |
+
tab_task_selector_6 = gr.Dropdown(["GroundedSAM Inpainting",
|
| 282 |
+
"Stable Diffusion with ControlNet Inpainting Pipeline",
|
| 283 |
+
"Stable Diffusion v1.5 Inpainting Pipeline",
|
| 284 |
+
"Stable Diffusion XL Inpainting Pipeline",
|
| 285 |
+
"Kandinsky v2.2 Inpainting Pipeline"], label="Select Model")
|
| 286 |
+
gr.Markdown("""
|
| 287 |
+
- **GroundedSAM Inpainting (GroundingDINO + SAM + Stable Diffusion)**:
|
| 288 |
+
Required Inputs: Input Image, Detection Prompt , Inpainting Prompt
|
| 289 |
+
Input in the text box below the object(s) in the input image for which the masks are to be generated.
|
| 290 |
+
Example detection prompt: "dog"
|
| 291 |
+
Example inpaint prompt: "white tiger, photorealistic, detailed, high quality"
|
| 292 |
+
""")
|
| 293 |
+
text_input_gsam = gr.Textbox(label="Detection Prompt: ", value="dog")
|
| 294 |
+
inpaint_input_gsam = gr.Textbox(label="Inpainting Prompt: ", value="astronaut, white suit, 8k, extremely detailed, ornate, cinematic lighting, vivid, photorealistic, detailed, high quality")
|
| 295 |
+
gr.Markdown("""
|
| 296 |
+
- **Kandinsky v2.2 / Stable Diffusion v1.5 / SDXL / SD + ControlNet**:
|
| 297 |
+
Required Inputs: Input Image, Pixel Coodinates , Inpainting Prompt
|
| 298 |
+
Input in the text box below the object(s) in the input image for which the masks are to be generated.
|
| 299 |
+
Example Text Prompt: "white tiger, photorealistic, detailed, high quality"
|
| 300 |
+
Example Negative Prompt: "poor details, poor quality, blurry, deformed, extra limbs"
|
| 301 |
+
""")
|
| 302 |
+
text_input_inpaint_pipe = gr.Textbox(label="Text Prompt: ", value="astronaut, white suit, 8k, extremely detailed, ornate, cinematic lighting, vivid, photorealistic, detailed, high quality")
|
| 303 |
+
np_inpaint = gr.Textbox(label="Negative Prompt: ", value="poor details, poor quality, blurry, deformed, extra limbs")
|
| 304 |
+
steps_inpaint = gr.Slider(minimum=5, maximum=200, label="Number of inference steps: ", value=150, step=1)
|
| 305 |
+
|
| 306 |
+
with gr.Tab("Pipeline - Object Removal"):
|
| 307 |
+
tab_task_selector_7 = gr.Dropdown(["LDM Removal Pipeline", " "], label="Select Model")
|
| 308 |
+
gr.Markdown("""
|
| 309 |
+
### Instructions
|
| 310 |
+
- **LDM Removal Pipeline**:
|
| 311 |
+
Required inputs: Input Image, Pixel Coodinates, DDIM Steps
|
| 312 |
+
If you wish to view the mask before the fnal output, go to the 'Mask Generation Preview' Tab.
|
| 313 |
+
Type the image coordinates manually in the box under the image or click on the image directly.
|
| 314 |
+
For a more detailed mask of a specific object or part of it, select multiple points.
|
| 315 |
+
Finally, choose number of DDIM steps simply click on the 'Generate' button:
|
| 316 |
+
""")
|
| 317 |
+
ddim_steps_pipe = gr.Slider(minimum=5, maximum=250, label="Number of DDIM sampling steps for object removal", value=150, step=1)
|
| 318 |
+
|
| 319 |
+
with gr.Tab("Background Blurring"):
|
| 320 |
+
tab_task_selector_8 = gr.Dropdown(["Portrait Mode - Depth Anything"], label='Select Model')
|
| 321 |
+
gr.Markdown("""
|
| 322 |
+
### Instructions
|
| 323 |
+
- **Portrait Mode - Depth Anything**:
|
| 324 |
+
Required inputs: Input Image, box blur, sharpen
|
| 325 |
+
Recommended blur values range: 2-25
|
| 326 |
+
Recommended sharpen values range: 0-5
|
| 327 |
+
Adjust the required inputs with the siders below:
|
| 328 |
+
""")
|
| 329 |
+
blur = gr.Slider(minimum=0, maximum=50, label="Box Blur value", value=5, step=1)
|
| 330 |
+
sharpen = gr.Slider(minimum=0, maximum=7, label="Sharpen Parameter", value=0, step=1)
|
| 331 |
+
|
| 332 |
+
with gr.Tab("Outpainting"):
|
| 333 |
+
tab_task_selector_9 = gr.Dropdown(["Outpainting - Stable Diffusion", "Outpainting - Stable Diffusion XL"], label='Select Model')
|
| 334 |
+
gr.Markdown("""
|
| 335 |
+
### Instructions
|
| 336 |
+
- **Outpainting - Stable Diffusion**:
|
| 337 |
+
Required inputs: Input Image, Text Prompt, extend left/right/up/down, steps
|
| 338 |
+
Choose how much and which direction you want to extend /outpaint your image and specify a text prompt.
|
| 339 |
+
Example prompt: "open plan, kitchen and living room, black umbrella on the floor, modular furniture with cotton textiles, wooden floor, high ceiling, large steel windows viewing a city"
|
| 340 |
+
""")
|
| 341 |
+
prompt_outpaint = gr.Textbox(label="Text Prompt: ", value="open plan, kitchen and living room, black umbrella on the floor, modular furniture with cotton textiles, wooden floor, high ceiling, large steel windows viewing a city")
|
| 342 |
+
e_l = gr.Slider(minimum=0, maximum=1000, label="Extend Left", value=200, step=1)
|
| 343 |
+
e_r = gr.Slider(minimum=0, maximum=1000, label="Extend Right", value=200, step=1)
|
| 344 |
+
e_u = gr.Slider(minimum=0, maximum=1000, label="Extend Up", value=200, step=1)
|
| 345 |
+
e_d = gr.Slider(minimum=0, maximum=1000, label="Extend Down", value=200, step=1)
|
| 346 |
+
steps_outpaint = gr.Slider(minimum=0, maximum=200, label="Number of Steps", value=50, step=1)
|
| 347 |
+
|
| 348 |
+
with gr.Tab("Background Replacement"):
|
| 349 |
+
tab_task_selector_10 = gr.Dropdown(["Background Replacement - Stable Diffusion", "Background Replacement - Stable Diffusion XL"], label='Select Model')
|
| 350 |
+
gr.Markdown("""
|
| 351 |
+
### Instructions
|
| 352 |
+
- **Background Replacement - Stable Diffusion**:
|
| 353 |
+
Required inputs: Input Image, Text Prompt, steps
|
| 354 |
+
Specify the new background in the text box below.
|
| 355 |
+
Example prompt: "dog sitting on the beach, sunny day, blue sky"
|
| 356 |
+
""")
|
| 357 |
+
prompt_background = gr.Textbox(label="Text Prompt: ", value="dog sitting on the beach, sunny day, blue sky, cinematic, pixbay, modernist, realistic, unreal engine, hyper detailed, photorealistic, maximum detail, volumetric light, moody cinematic epic concept art, vivid")
|
| 358 |
+
steps_br = gr.Slider(minimum=0, maximum=200, label="Number of Steps", value=30, step=1)
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
input_image.select(input_handler, inputs=[input_image], outputs=[coord_input, input_image])
|
| 363 |
+
|
| 364 |
+
generate_button.click(
|
| 365 |
+
fn=run_afm_app,
|
| 366 |
+
inputs=[task_selector, input_image, mask_image, text_input, text_input_x, text_input_gsam, coord_input, ddim_steps, ddim_steps_pipe,
|
| 367 |
+
inpaint_input_gsam, text_input_inpaint_pipe, text_input_restyling, blur, sharpen, prompt_outpaint, e_l, e_r, e_u, e_d, steps_outpaint,
|
| 368 |
+
prompt_background, steps_br, str_res, gs_res, np_res, steps_res, np_inpaint, steps_inpaint, prompt_txt2img, np_txt2img, gs_txt2img,
|
| 369 |
+
steps_txt2img, steps_super, dilation_bool, dilation_value, steps_inp],
|
| 370 |
+
outputs=output_image
|
| 371 |
+
)
|
| 372 |
+
|
| 373 |
+
reset_button.click(
|
| 374 |
+
fn=reset_selected_points,
|
| 375 |
+
inputs=[input_image],
|
| 376 |
+
outputs=[coord_input, input_image]
|
| 377 |
+
)
|
| 378 |
+
|
| 379 |
+
reload_image_button.click(
|
| 380 |
+
fn=reload_image,
|
| 381 |
+
inputs=[gr.State(original_image_path)],
|
| 382 |
+
outputs=[input_image]
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
reload_output_button.click(
|
| 386 |
+
fn=reload_image_with_output,
|
| 387 |
+
inputs=[output_image],
|
| 388 |
+
outputs=[input_image]
|
| 389 |
+
)
|
| 390 |
+
|
| 391 |
+
reload_mask_button.click(
|
| 392 |
+
fn=reload_mask,
|
| 393 |
+
inputs=[output_image],
|
| 394 |
+
outputs=[mask_image]
|
| 395 |
+
)
|
| 396 |
+
|
| 397 |
+
tab_task_selector_1.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_1], outputs=[task_selector])
|
| 398 |
+
tab_task_selector_2.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_2], outputs=[task_selector])
|
| 399 |
+
tab_task_selector_3.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_3], outputs=[task_selector])
|
| 400 |
+
tab_task_selector_4.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_4], outputs=[task_selector])
|
| 401 |
+
tab_task_selector_5.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_5], outputs=[task_selector])
|
| 402 |
+
tab_task_selector_6.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_6], outputs=[task_selector])
|
| 403 |
+
tab_task_selector_7.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_7], outputs=[task_selector])
|
| 404 |
+
tab_task_selector_8.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_8], outputs=[task_selector])
|
| 405 |
+
tab_task_selector_9.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_9], outputs=[task_selector])
|
| 406 |
+
tab_task_selector_10.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_10], outputs=[task_selector])
|
| 407 |
+
tab_task_selector_11.change(fn=update_task_selector, inputs=[task_selector, tab_task_selector_11], outputs=[task_selector])
|
| 408 |
+
|
| 409 |
+
block.launch(share=True)
|
scott.png
ADDED
|