Vignesh455 commited on
Commit
0cddddf
·
verified ·
1 Parent(s): 7cbef73

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -4,7 +4,7 @@ import torch
4
  from diffusers import AutoPipelineForInpainting, UNet2DConditionModel
5
  import diffusers
6
  from share_btn import community_icon_html, loading_icon_html, share_js
7
- from PIL import Image
8
 
9
  pipe = AutoPipelineForInpainting.from_pretrained("SG161222/Realistic_Vision_V5.0_noVAE").to("cuda")
10
 
@@ -17,7 +17,6 @@ def read_content(file_path: str) -> str:
17
  return content
18
 
19
  def predict(dict, prompt="", negative_prompt="", guidance_scale=7.5, steps=20, strength=1.0, scheduler="EulerDiscreteScheduler"):
20
- original_width, original_height = Image.open(dict["image"]).shape[1:]
21
  if negative_prompt == "":
22
  negative_prompt = None
23
  scheduler_class_name = scheduler.split("-")[0]
@@ -36,7 +35,7 @@ def predict(dict, prompt="", negative_prompt="", guidance_scale=7.5, steps=20, s
36
 
37
  output = pipe(prompt = prompt, negative_prompt=negative_prompt, image=init_image, mask_image=mask, guidance_scale=guidance_scale, num_inference_steps=int(steps), strength=strength, clip_skip=1)
38
  output_image = output.images[0] # Assuming the output dictionary has an 'images' key
39
- output_image = F.interpolate(output_image.unsqueeze(0), size=(original_height, original_width))[0]
40
  return output.images, gr.update(visible=True)
41
 
42
 
 
4
  from diffusers import AutoPipelineForInpainting, UNet2DConditionModel
5
  import diffusers
6
  from share_btn import community_icon_html, loading_icon_html, share_js
7
+
8
 
9
  pipe = AutoPipelineForInpainting.from_pretrained("SG161222/Realistic_Vision_V5.0_noVAE").to("cuda")
10
 
 
17
  return content
18
 
19
  def predict(dict, prompt="", negative_prompt="", guidance_scale=7.5, steps=20, strength=1.0, scheduler="EulerDiscreteScheduler"):
 
20
  if negative_prompt == "":
21
  negative_prompt = None
22
  scheduler_class_name = scheduler.split("-")[0]
 
35
 
36
  output = pipe(prompt = prompt, negative_prompt=negative_prompt, image=init_image, mask_image=mask, guidance_scale=guidance_scale, num_inference_steps=int(steps), strength=strength, clip_skip=1)
37
  output_image = output.images[0] # Assuming the output dictionary has an 'images' key
38
+ output_image = F.interpolate(output_image.unsqueeze(0), size=(1024,832))[0]
39
  return output.images, gr.update(visible=True)
40
 
41