KheemDH commited on
Commit
ca2d463
Β·
1 Parent(s): 166fda9

Add Gradio interface and pipeline for inpainting

Browse files
Files changed (2) hide show
  1. app.py +46 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image
3
+ from diffusers import AutoPipelineForInpainting, AutoencoderKL
4
+ import torch
5
+
6
+ # Load models
7
+ vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
8
+ pipeline = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
9
+ vae=vae,
10
+ torch_dtype=torch.float16,
11
+ variant="fp16",
12
+ use_safetensors=True).to("cuda")
13
+
14
+ # Define the inference function
15
+ def inpaint(prompt, image, mask_image, ip_image):
16
+ image = image.convert("RGB").resize((512, 512))
17
+ mask_image = mask_image.convert("RGB").resize((512, 512))
18
+ ip_image = ip_image.convert("RGB").resize((512, 512))
19
+
20
+ results = pipeline(
21
+ prompt=prompt,
22
+ negative_prompt="ugly, bad quality, bad anatomy",
23
+ image=image,
24
+ mask_image=mask_image,
25
+ ip_adapter_image=ip_image,
26
+ strength=0.99,
27
+ guidance_scale=8.0,
28
+ num_inference_steps=100
29
+ )
30
+ return results.images[0]
31
+
32
+ # Set up the Gradio interface
33
+ demo = gr.Interface(
34
+ fn=inpaint,
35
+ inputs=[
36
+ gr.Textbox(label="Prompt", placeholder="Enter the prompt for the model"),
37
+ gr.Image(type="pil", label="Input Image"),
38
+ gr.Image(type="pil", label="Mask Image"),
39
+ gr.Image(type="pil", label="IP Adapter Image")
40
+ ],
41
+ outputs=gr.Image(type="pil"),
42
+ title="Stable Diffusion Inpainting",
43
+ description="A model for inpainting and image editing using Stable Diffusion XL."
44
+ )
45
+
46
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio
2
+ diffusers
3
+ torch