coralLight commited on
Commit
7532355
·
1 Parent(s): 4255cce

add inference

Browse files
Files changed (1) hide show
  1. app.py +28 -26
app.py CHANGED
@@ -175,38 +175,40 @@ accelerator = accelerate.Accelerator()
175
 
176
  def generate_image_with_steps(prompt, negative_prompt, seed, width, height, guidance_scale, num_inference_steps):
177
  """Helper function to generate image with specific number of steps"""
178
- prompts = [prompt]
179
- if num_inference_steps > 8:
180
- sampler = UniPCSampler(pipe,model_closure=model_closure, steps=num_inference_steps, guidance_scale=guidance_scale,skip_type='time_uniform')
181
- else:
182
- sampler = UniPCSampler(pipe,model_closure=model_closure, steps=num_inference_steps, guidance_scale=guidance_scale)
183
-
184
- c = prompts
185
- uc = ['(worst quality:2), (low quality:2), (normal quality:2), bad anatomy, bad proportions, poorly drawn face, poorly drawn hands, missing fingers, extra limbs, blurry, pixelated, distorted, lowres, jpeg artifacts, watermark, signature, text, (deformed:1.5), (bad hands:1.3), overexposed, underexposed, censored, mutated, extra fingers, cloned face, bad eyes'] * len(c) if guidance_scale != 1.0 else None
186
- shape = [4, width // 8, height // 8]
 
 
187
  # if opt.method == "dpm_solver_v3":
188
  # batch_size, shape, conditioning, x_T, unconditional_conditioning
189
- samples, _ = sampler.sample(
190
- conditioning=c,
191
- batch_size=1,
192
- shape=shape,
193
- unconditional_conditioning=uc,
194
- x_T=None,
195
- start_free_u_step=6 if num_inference_steps == 8 else 4,
196
- xl_preprocess_closure = prepare_sdxl_pipeline_step_parameter,
197
  # npnet = npn_net,
198
- use_corrector=True,
199
- )
200
 
201
- x_samples = pipe.vae.decode(samples / pipe.vae.config.scaling_factor).sample
202
- x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
203
- x_samples = x_samples.cpu().permute(0, 2, 3, 1).numpy()
204
 
205
- x_image_torch = torch.from_numpy(x_samples).permute(0, 3, 1, 2) # need to pay attention
206
 
207
- for x_sample in x_image_torch:
208
- x_sample = 255.0 * rearrange(x_sample.cpu().numpy(), "c h w -> h w c")
209
- img = Image.fromarray(x_sample.astype(np.uint8))
210
  return img
211
 
212
  @spaces.GPU #[uncomment to use ZeroGPU]
 
175
 
176
  def generate_image_with_steps(prompt, negative_prompt, seed, width, height, guidance_scale, num_inference_steps):
177
  """Helper function to generate image with specific number of steps"""
178
+ with torch.no_grad():
179
+ with precision_scope("cuda"):
180
+ prompts = [prompt]
181
+ if num_inference_steps > 8:
182
+ sampler = UniPCSampler(pipe,model_closure=model_closure, steps=num_inference_steps, guidance_scale=guidance_scale,skip_type='time_uniform')
183
+ else:
184
+ sampler = UniPCSampler(pipe,model_closure=model_closure, steps=num_inference_steps, guidance_scale=guidance_scale)
185
+
186
+ c = prompts
187
+ uc = ['(worst quality:2), (low quality:2), (normal quality:2), bad anatomy, bad proportions, poorly drawn face, poorly drawn hands, missing fingers, extra limbs, blurry, pixelated, distorted, lowres, jpeg artifacts, watermark, signature, text, (deformed:1.5), (bad hands:1.3), overexposed, underexposed, censored, mutated, extra fingers, cloned face, bad eyes'] * len(c) if guidance_scale != 1.0 else None
188
+ shape = [4, width // 8, height // 8]
189
  # if opt.method == "dpm_solver_v3":
190
  # batch_size, shape, conditioning, x_T, unconditional_conditioning
191
+ samples, _ = sampler.sample(
192
+ conditioning=c,
193
+ batch_size=1,
194
+ shape=shape,
195
+ unconditional_conditioning=uc,
196
+ x_T=None,
197
+ start_free_u_step=6 if num_inference_steps == 8 else 4 if num_inference_steps < 8 else None,
198
+ xl_preprocess_closure = prepare_sdxl_pipeline_step_parameter,
199
  # npnet = npn_net,
200
+ use_corrector=True,
201
+ )
202
 
203
+ x_samples = pipe.vae.decode(samples / pipe.vae.config.scaling_factor).sample
204
+ x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
205
+ x_samples = x_samples.cpu().permute(0, 2, 3, 1).numpy()
206
 
207
+ x_image_torch = torch.from_numpy(x_samples).permute(0, 3, 1, 2) # need to pay attention
208
 
209
+ for x_sample in x_image_torch:
210
+ x_sample = 255.0 * rearrange(x_sample.cpu().numpy(), "c h w -> h w c")
211
+ img = Image.fromarray(x_sample.astype(np.uint8))
212
  return img
213
 
214
  @spaces.GPU #[uncomment to use ZeroGPU]