coralLight commited on
Commit
1ee92d4
·
1 Parent(s): f202d56

add inference

Browse files
__pycache__/NoiseTransformer.cpython-39.pyc ADDED
Binary file (1.45 kB). View file
 
__pycache__/SVDNoiseUnet.cpython-39.pyc ADDED
Binary file (11.2 kB). View file
 
__pycache__/customed_unipc_scheduler.cpython-39.pyc ADDED
Binary file (28.8 kB). View file
 
__pycache__/dpm_solver_v3.cpython-39.pyc ADDED
Binary file (32.2 kB). View file
 
__pycache__/free_lunch_utils.cpython-39.pyc ADDED
Binary file (7.78 kB). View file
 
__pycache__/sampler.cpython-39.pyc ADDED
Binary file (7.12 kB). View file
 
__pycache__/uni_pc.cpython-39.pyc ADDED
Binary file (18.4 kB). View file
 
app.py CHANGED
@@ -167,7 +167,8 @@ def generate_image_with_steps(prompt, negative_prompt, seed, width, height, guid
167
  """Helper function to generate image with specific number of steps"""
168
  scheduler = CustomedUniPCMultistepScheduler.from_config(pipe.scheduler.config
169
  , solver_order = 2 if num_inference_steps==8 else 1
170
- ,denoise_to_zero = False)
 
171
  pipe.scheduler = scheduler
172
  pipe.to('cuda')
173
  with torch.no_grad():
 
167
  """Helper function to generate image with specific number of steps"""
168
  scheduler = CustomedUniPCMultistepScheduler.from_config(pipe.scheduler.config
169
  , solver_order = 2 if num_inference_steps==8 else 1
170
+ ,denoise_to_zero = False
171
+ , use_afs=True)
172
  pipe.scheduler = scheduler
173
  pipe.to('cuda')
174
  with torch.no_grad():
customed_unipc_scheduler.py CHANGED
@@ -215,6 +215,7 @@ class CustomedUniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
215
  skip_type: str = "customed_time_karras",
216
  denoise_to_zero: bool = False,
217
  rescale_betas_zero_snr: bool = False,
 
218
  ):
219
 
220
  if self.config.use_beta_sigmas and not is_scipy_available():
@@ -237,6 +238,7 @@ class CustomedUniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
237
  raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}")
238
 
239
  self.skip_type = skip_type
 
240
  self.denoise_to_zero = denoise_to_zero
241
  if rescale_betas_zero_snr:
242
  self.betas = rescale_zero_terminal_snr(self.betas)
@@ -362,10 +364,14 @@ class CustomedUniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
362
  if self.denoise_to_zero:
363
  ct_real_end = self._sigma_to_t(sigmas[-1], log_sigmas)
364
  timesteps = self.get_sigmas_karras(8, ct_end, ct_start,rho=1.2, customed_final_sigma= ct_real_end if self.denoise_to_zero else None)
365
- timesteps_tmp = copy.deepcopy(timesteps)
366
- timesteps_tmp = np.append(timesteps_tmp, self._sigma_to_t(sigmas[-1], log_sigmas))
367
- sigmas = np.array([self._t_to_sigma(t, log_sigmas) for t in timesteps_tmp])
368
 
 
 
 
 
 
 
 
369
  self.sigmas = torch.from_numpy(sigmas)
370
  self.timesteps = torch.from_numpy(timesteps).to(device=device)
371
 
@@ -444,7 +450,7 @@ class CustomedUniPCMultistepScheduler(SchedulerMixin, ConfigMixin):
444
 
445
  def _t_to_sigma(self, t, log_sigmas):
446
  # t = t
447
- low_idx, high_idx, w = np.int64(np.floor(t)), np.int64(np.ceil(t)), t - np.floor(t)
448
  log_sigma = (1 - w) * log_sigmas[low_idx] + w * log_sigmas[high_idx]
449
  return np.exp(log_sigma)
450
 
 
215
  skip_type: str = "customed_time_karras",
216
  denoise_to_zero: bool = False,
217
  rescale_betas_zero_snr: bool = False,
218
+ use_afs: bool = False
219
  ):
220
 
221
  if self.config.use_beta_sigmas and not is_scipy_available():
 
238
  raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}")
239
 
240
  self.skip_type = skip_type
241
+ self.use_afs = use_afs
242
  self.denoise_to_zero = denoise_to_zero
243
  if rescale_betas_zero_snr:
244
  self.betas = rescale_zero_terminal_snr(self.betas)
 
364
  if self.denoise_to_zero:
365
  ct_real_end = self._sigma_to_t(sigmas[-1], log_sigmas)
366
  timesteps = self.get_sigmas_karras(8, ct_end, ct_start,rho=1.2, customed_final_sigma= ct_real_end if self.denoise_to_zero else None)
 
 
 
367
 
368
+ if self.use_afs:
369
+ np.insert(timesteps,1,(timesteps[0]+timesteps[1]) / 2)
370
+
371
+
372
+ timesteps_tmp = copy.deepcopy(timesteps)
373
+ timesteps_tmp = np.append(timesteps_tmp, self._sigma_to_t(sigmas[-1], log_sigmas))
374
+ sigmas = np.array([self._t_to_sigma(t, log_sigmas) for t in timesteps_tmp])
375
  self.sigmas = torch.from_numpy(sigmas)
376
  self.timesteps = torch.from_numpy(timesteps).to(device=device)
377
 
 
450
 
451
  def _t_to_sigma(self, t, log_sigmas):
452
  # t = t
453
+ low_idx, high_idx, w = np.int64(np.floor(t)), np.clip(np.int64(np.ceil(t)),a_min=0,a_max=999) , t - np.floor(t)
454
  log_sigma = (1 - w) * log_sigmas[low_idx] + w * log_sigmas[high_idx]
455
  return np.exp(log_sigma)
456