Fix when latents is None
Browse files
animatediff/pipelines/pipeline_animation.py
CHANGED
|
@@ -332,8 +332,11 @@ class AnimationPipeline(DiffusionPipeline):
|
|
| 332 |
if init_latents is not None:
|
| 333 |
init_latents = init_latents.unsqueeze(0).repeat(batch_size, 1, 1, 1, 1)
|
| 334 |
for i in range(video_length):
|
| 335 |
-
|
| 336 |
-
|
|
|
|
|
|
|
|
|
|
| 337 |
else:
|
| 338 |
latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device)
|
| 339 |
|
|
@@ -349,6 +352,7 @@ class AnimationPipeline(DiffusionPipeline):
|
|
| 349 |
return latents
|
| 350 |
|
| 351 |
|
|
|
|
| 352 |
@torch.no_grad()
|
| 353 |
def __call__(
|
| 354 |
self,
|
|
|
|
| 332 |
if init_latents is not None:
|
| 333 |
init_latents = init_latents.unsqueeze(0).repeat(batch_size, 1, 1, 1, 1)
|
| 334 |
for i in range(video_length):
|
| 335 |
+
if init_latents is None:
|
| 336 |
+
latents[:, :, i, :, :] = torch.randn(latents[:, :, i, :, :].shape, device=rand_device, dtype=dtype) * self.scheduler.init_noise_sigma
|
| 337 |
+
else:
|
| 338 |
+
init_alpha = (video_length - float(i)) / video_length / 30
|
| 339 |
+
latents[:, :, i, :, :] = init_latents * init_alpha + latents[:, :, i, :, :] * (1 - init_alpha)
|
| 340 |
else:
|
| 341 |
latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device)
|
| 342 |
|
|
|
|
| 352 |
return latents
|
| 353 |
|
| 354 |
|
| 355 |
+
|
| 356 |
@torch.no_grad()
|
| 357 |
def __call__(
|
| 358 |
self,
|