Alexander Bagus commited on
Commit
efce06c
·
1 Parent(s): e6836fc
Files changed (1) hide show
  1. app.py +13 -12
app.py CHANGED
@@ -113,7 +113,7 @@ def inference(
113
  num_inference_steps=8,
114
  progress=gr.Progress(track_tqdm=True),
115
  ):
116
- # guidance_scale=2
117
  timestamp = time.time()
118
  print(f"timestamp: {timestamp}")
119
 
@@ -125,9 +125,10 @@ def inference(
125
 
126
  upscale_target = 2
127
  upscale_nearest = 16
 
128
  # rescale to prevent OOM
129
  input_image = edit_dict['background']
130
- input_image, width, height = image_utils.rescale_image(input_image, upscale_target, upscale_nearest, max_size=1280)
131
  sample_size = [height, width]
132
 
133
  print("DEBUG: inpaint_image")
@@ -138,18 +139,18 @@ def inference(
138
 
139
  print("DEBUG: mask_image")
140
  if mask_image is not None:
141
- mask_image, w, h = image_utils.rescale_image(mask_image, upscale_target, upscale_nearest, max_size=1280)
142
  mask_image = get_image_latent(mask_image, sample_size=sample_size)[:, :1, 0]
143
  else:
144
  mask_image = torch.ones([1, 1, sample_size[0], sample_size[1]]) * 255
145
 
146
- # print("DEBUG: control_image_torch")
147
- # processor = Processor('canny')
148
- # control_image, w, h = image_utils.rescale_image(input_image, scale_target, upscale_nearest, max_size=1280)
149
- # control_image = control_image.resize((1024, 1024))
150
- # control_image = processor(control_image, to_pil=True)
151
- # control_image = control_image.resize((width, height))
152
- # control_image_torch = get_image_latent(control_image, sample_size=sample_size)[:, :, 0]
153
 
154
  # generation
155
  if randomize_seed: seed = random.randint(0, MAX_SEED)
@@ -164,7 +165,7 @@ def inference(
164
  guidance_scale=guidance_scale,
165
  image = inpaint_image,
166
  mask_image = mask_image,
167
- # control_image=control_image_torch,
168
  num_inference_steps=num_inference_steps,
169
  control_context_scale=control_context_scale,
170
  ).images[0]
@@ -240,7 +241,7 @@ with gr.Blocks() as demo:
240
  guidance_scale = gr.Slider(
241
  label="Guidance scale",
242
  minimum=0.0,
243
- maximum=3.0,
244
  step=0.1,
245
  value=1.0,
246
  )
 
113
  num_inference_steps=8,
114
  progress=gr.Progress(track_tqdm=True),
115
  ):
116
+ # guidance_scale=1
117
  timestamp = time.time()
118
  print(f"timestamp: {timestamp}")
119
 
 
125
 
126
  upscale_target = 2
127
  upscale_nearest = 16
128
+ upscale_max_size = 1440
129
  # rescale to prevent OOM
130
  input_image = edit_dict['background']
131
+ input_image, width, height = image_utils.rescale_image(input_image, upscale_target, upscale_nearest, max_size=upscale_max_size)
132
  sample_size = [height, width]
133
 
134
  print("DEBUG: inpaint_image")
 
139
 
140
  print("DEBUG: mask_image")
141
  if mask_image is not None:
142
+ mask_image, w, h = image_utils.rescale_image(mask_image, upscale_target, upscale_nearest, max_size=upscale_max_size)
143
  mask_image = get_image_latent(mask_image, sample_size=sample_size)[:, :1, 0]
144
  else:
145
  mask_image = torch.ones([1, 1, sample_size[0], sample_size[1]]) * 255
146
 
147
+ print("DEBUG: control_image_torch")
148
+ processor = Processor('openpose_full')
149
+ control_image, w, h = image_utils.rescale_image(input_image, scale_target, upscale_nearest, max_size=1280)
150
+ control_image = control_image.resize((1024, 1024))
151
+ control_image = processor(control_image, to_pil=True)
152
+ control_image = control_image.resize((width, height))
153
+ control_image_torch = get_image_latent(control_image, sample_size=sample_size)[:, :, 0]
154
 
155
  # generation
156
  if randomize_seed: seed = random.randint(0, MAX_SEED)
 
165
  guidance_scale=guidance_scale,
166
  image = inpaint_image,
167
  mask_image = mask_image,
168
+ control_image=control_image_torch,
169
  num_inference_steps=num_inference_steps,
170
  control_context_scale=control_context_scale,
171
  ).images[0]
 
241
  guidance_scale = gr.Slider(
242
  label="Guidance scale",
243
  minimum=0.0,
244
+ maximum=1.0,
245
  step=0.1,
246
  value=1.0,
247
  )